signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class CommandLine { /** * A method for reporting error messages in { @ link CommandLineListener # execute ( CommandLineArgument [ ] ) } implementations . * It ensures that messages are written to the log file and / or written to stderr as appropriate . * @ param str the error message */ public static void error ( String str ) { } }
switch ( ZAP . getProcessType ( ) ) { case cmdline : System . err . println ( str ) ; break ; default : // Ignore } // Always write to the log logger . error ( str ) ;
public class ISUPMessageFactoryImpl { /** * ( non - Javadoc ) * @ see org . restcomm . protocols . ss7 . isup . ISUPMessageFactory # createCQM ( ) */ @ Override public CircuitGroupQueryMessage createCQM ( ) { } }
CircuitGroupQueryMessage msg = new CircuitGroupQueryMessageImpl ( _CQM_HOLDER . mandatoryCodes , _CQM_HOLDER . mandatoryVariableCodes , _CQM_HOLDER . optionalCodes , _CQM_HOLDER . mandatoryCodeToIndex , _CQM_HOLDER . mandatoryVariableCodeToIndex , _CQM_HOLDER . optionalCodeToIndex ) ; return msg ;
public class ItemLink { /** * This method is called when a reference is being added by an active transaction * and when a reference is being restored . * It should only be called by the message store code . * @ throws SevereMessageStoreException */ public final synchronized void incrementReferenceCount ( ) throws SevereMessageStoreException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "incrementReferenceCount" ) ; if ( _referenceCountIsDecreasing ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) SibTr . event ( this , tc , "Cannot increment! Reference count has begun decreasing." ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "incrementReferenceCount" ) ; throw new SevereMessageStoreException ( "Cannot add more references to an item after one has been removed" ) ; } _referenceCount ++ ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "incrementReferenceCount" ) ;
public class BeanUtils { /** * get the getter method corresponding to given property */ public static Method getGetterPropertyMethod ( Class < ? > type , String propertyName ) { } }
String sourceMethodName = "get" + BeanUtils . capitalizePropertyName ( propertyName ) ; Method sourceMethod = BeanUtils . getMethod ( type , sourceMethodName ) ; if ( sourceMethod == null ) { sourceMethodName = "is" + BeanUtils . capitalizePropertyName ( propertyName ) ; sourceMethod = BeanUtils . getMethod ( type , sourceMethodName ) ; if ( sourceMethod != null && sourceMethod . getReturnType ( ) != Boolean . TYPE ) { sourceMethod = null ; } } return sourceMethod ;
public class CachedResourceBundlesHandler { /** * ( non - Javadoc ) * @ see * net . jawr . web . resource . bundle . ResourceBundlesHandler # streamBundleTo ( java * . lang . String , java . io . OutputStream ) */ @ Override public void streamBundleTo ( String bundlePath , OutputStream out ) throws ResourceNotFoundException { } }
try { // byte [ ] gzip = gzipCache . get ( bundlePath ) ; byte [ ] gzip = ( byte [ ] ) cacheMgr . get ( ZIP_CACHE_PREFIX + bundlePath ) ; // If it ' s not cached yet if ( null == gzip ) { // Stream the stored data ByteArrayOutputStream baOs = new ByteArrayOutputStream ( ) ; BufferedOutputStream bfOs = new BufferedOutputStream ( baOs ) ; rsHandler . streamBundleTo ( bundlePath , bfOs ) ; // Copy the data into the ByteBuffer bfOs . close ( ) ; gzip = baOs . toByteArray ( ) ; // Cache the byte array cacheMgr . put ( ZIP_CACHE_PREFIX + bundlePath , gzip ) ; } // Write bytes to the outputstream IOUtils . write ( gzip , out ) ; } catch ( IOException e ) { throw new BundlingProcessException ( "Unexpected IOException writing bundle[" + bundlePath + "]" , e ) ; }
public class TransactionQueue { /** * Understands and applies the following integer properties . * < ul > * < li > max . size - setMaximumSize * < li > max . threads - setMaximumThreads * < li > timeout . idle - setIdleTimeout * < li > timeout . transaction - setTransactionTimeout * < li > tune . size - Automatically tunes queue size when " true " and * transaction timeout set . * < li > tune . threads - Automatically tunes maximum thread count . * < / ul > */ public synchronized void applyProperties ( PropertyMap properties ) { } }
if ( properties . containsKey ( "max.size" ) ) { setMaximumSize ( properties . getInt ( "max.size" ) ) ; } if ( properties . containsKey ( "max.threads" ) ) { setMaximumThreads ( properties . getInt ( "max.threads" ) ) ; } if ( properties . containsKey ( "timeout.idle" ) ) { setIdleTimeout ( properties . getNumber ( "timeout.idle" ) . longValue ( ) ) ; } if ( properties . containsKey ( "timeout.transaction" ) ) { setTransactionTimeout ( properties . getNumber ( "timeout.transaction" ) . longValue ( ) ) ; } if ( "true" . equalsIgnoreCase ( properties . getString ( "tune.size" ) ) ) { addTransactionQueueListener ( new TransactionQueueSizeTuner ( ) ) ; } if ( "true" . equalsIgnoreCase ( properties . getString ( "tune.threads" ) ) ) { addTransactionQueueListener ( new TransactionQueueThreadTuner ( ) ) ; }
public class BooleanUtilities { /** * Given a boolean in string format , it checks if it ' s ' true ' or ' false ' ( case insensitive ) * @ param booleanStr the string to check * @ return true if booleanStr is ' true ' or ' false ' otherwise false */ public static boolean isValid ( @ Nullable final String booleanStr ) { } }
if ( StringUtils . isBlank ( booleanStr ) ) { return false ; } final String lowerCaseBoolean = getLowerCaseString ( booleanStr ) ; return lowerCaseBoolean . equals ( BooleanValues . TRUE ) || lowerCaseBoolean . equals ( BooleanValues . FALSE ) ;
public class Configuration { /** * Returns the value associated with the given key as a long . * @ param key * the key pointing to the associated value * @ param defaultValue * the default value which is returned in case there is no value associated with the given key * @ return the ( default ) value associated with the given key */ public long getLong ( String key , long defaultValue ) { } }
Object o = getRawValue ( key ) ; if ( o == null ) { return defaultValue ; } return convertToLong ( o , defaultValue ) ;
public class BeanPath { /** * Create a new List typed path * @ param < A > * @ param < E > * @ param property property name * @ param type property type * @ param queryType expression type * @ return property path */ @ SuppressWarnings ( "unchecked" ) protected < A , E extends SimpleExpression < ? super A > > ListPath < A , E > createList ( String property , Class < ? super A > type , Class < ? super E > queryType , PathInits inits ) { } }
return add ( new ListPath < A , E > ( type , ( Class ) queryType , forProperty ( property ) , inits ) ) ;
public class Matrix2D { /** * Multiply the x and y coordinates of a Vertex against this matrix . */ public Vector3D mult ( Vector3D source ) { } }
Vector3D result = new Vector3D ( ) ; result . setX ( m00 * source . getX ( ) + m01 * source . getY ( ) + m02 ) ; result . setY ( m10 * source . getX ( ) + m11 * source . getY ( ) + m12 ) ; return result ;
public class HSQLDBSingleDbJDBCConnection { /** * { @ inheritDoc } */ @ Override protected ResultSet findLastOrderNumber ( int localMaxOrderNumber , boolean increment ) throws SQLException { } }
if ( findLastOrderNumber == null ) { findLastOrderNumber = dbConnection . prepareStatement ( FIND_LAST_ORDER_NUMBER ) ; } if ( ! increment ) { ResultSet count ; int result = - 1 ; while ( result < localMaxOrderNumber - 1 ) { count = findLastOrderNumber . executeQuery ( ) ; if ( count . next ( ) ) { result = count . getInt ( 1 ) ; } } } return findLastOrderNumber . executeQuery ( ) ;
public class ClassGraph { /** * Convert the class name into a corresponding URL */ public String classToUrl ( ClassDoc cd , boolean rootClass ) { } }
// building relative path for context and package diagrams if ( contextPackageName != null && rootClass ) return buildRelativePathFromClassNames ( contextPackageName , cd . containingPackage ( ) . name ( ) ) + cd . name ( ) + ".html" ; return classToUrl ( cd . qualifiedName ( ) ) ;
public class HexUtils { /** * Read a hex string of bits and write it into a bitset * @ param s hex string of the stored bits * @ param ba the bitset to store the bits in * @ param length the maximum number of bits to store */ public static void hexToBits ( String s , BitSet ba , int length ) { } }
byte [ ] b = hexToBytes ( s ) ; bytesToBits ( b , ba , length ) ;
public class MapStoreWrapper { /** * Returns an { @ link Iterable } of all keys or { @ code null } * if a map loader is not configured for this map . * { @ inheritDoc } */ @ Override public Iterable < Object > loadAllKeys ( ) { } }
if ( isMapLoader ( ) ) { Iterable < Object > allKeys ; try { allKeys = mapLoader . loadAllKeys ( ) ; } catch ( AbstractMethodError e ) { // Invoke reflectively to preserve backwards binary compatibility . Removable in v4 . x allKeys = ReflectionHelper . invokeMethod ( mapLoader , "loadAllKeys" ) ; } return allKeys ; } return null ;
public class ObservableObjectValueAssert { /** * Verifies that the actual observable has the same value as the given observable . * @ param expectedValue the observable value to compare with the actual observables current value . * @ return { @ code this } assertion instance . */ public ObservableObjectValueAssert < T > hasSameValue ( ObservableObjectValue < T > expectedValue ) { } }
new ObservableValueAssertions < > ( actual ) . hasSameValue ( expectedValue ) ; return this ;
public class CommerceCountryPersistenceImpl { /** * Returns the last commerce country in the ordered set where groupId = & # 63 ; and billingAllowed = & # 63 ; and active = & # 63 ; . * @ param groupId the group ID * @ param billingAllowed the billing allowed * @ param active the active * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the last matching commerce country * @ throws NoSuchCountryException if a matching commerce country could not be found */ @ Override public CommerceCountry findByG_B_A_Last ( long groupId , boolean billingAllowed , boolean active , OrderByComparator < CommerceCountry > orderByComparator ) throws NoSuchCountryException { } }
CommerceCountry commerceCountry = fetchByG_B_A_Last ( groupId , billingAllowed , active , orderByComparator ) ; if ( commerceCountry != null ) { return commerceCountry ; } StringBundler msg = new StringBundler ( 8 ) ; msg . append ( _NO_SUCH_ENTITY_WITH_KEY ) ; msg . append ( "groupId=" ) ; msg . append ( groupId ) ; msg . append ( ", billingAllowed=" ) ; msg . append ( billingAllowed ) ; msg . append ( ", active=" ) ; msg . append ( active ) ; msg . append ( "}" ) ; throw new NoSuchCountryException ( msg . toString ( ) ) ;
public class Filters { /** * Applies the given label to all cells in the output row . This allows the caller to determine * which results were produced from which part of the filter . * < p > Due to a technical limitation , it is not currently possible to apply multiple labels to a * cell . As a result , a { @ link ChainFilter } may have no more than one sub - filter which contains a * label . It is okay for an { @ link InterleaveFilter } to contain multiple labels , as they will be * applied to separate copies of the input . This may be relaxed in the future . */ public Filter label ( @ Nonnull String label ) { } }
Preconditions . checkNotNull ( label ) ; return new SimpleFilter ( RowFilter . newBuilder ( ) . setApplyLabelTransformer ( label ) . build ( ) ) ;
public class DescriptionBuilder { /** * @ return the built description */ public Description build ( ) { } }
if ( null == name ) { throw new IllegalStateException ( "name is not set" ) ; } final String title1 = null != title ? title : name ; final List < Property > properties1 = buildProperties ( ) ; final Map < String , String > mapping1 = Collections . unmodifiableMap ( mapping ) ; final Map < String , String > mapping2 = Collections . unmodifiableMap ( fwkmapping ) ; final Map < String , String > metadata2 = Collections . unmodifiableMap ( metadata ) ; return new Description ( ) { @ Override public String getName ( ) { return name ; } @ Override public String getTitle ( ) { return title1 ; } @ Override public String getDescription ( ) { return description ; } @ Override public List < Property > getProperties ( ) { return properties1 ; } @ Override public Map < String , String > getPropertiesMapping ( ) { return mapping1 ; } @ Override public Map < String , String > getFwkPropertiesMapping ( ) { return mapping2 ; } @ Override public Map < String , String > getMetadata ( ) { return metadata2 ; } @ Override public String toString ( ) { return "PropertyDescription{" + "name = " + getName ( ) + ", " + "title = " + getTitle ( ) + ", " + "description = " + getDescription ( ) + ", " + "properties = " + getProperties ( ) + ", " + "mapping = " + getPropertiesMapping ( ) + "frameworkMapping = " + getFwkPropertiesMapping ( ) + "metadata = " + metadata2 + "}" ; } } ;
public class Query { /** * returns tweets by users located within a given radius of the given latitude / longitude , where the user ' s location is taken from their Twitter profile * @ param location geo location * @ param radius radius * @ param unit Query . MILES or Query . KILOMETERS * @ since Twitter4J 4.0.1 */ public void setGeoCode ( GeoLocation location , double radius , Unit unit ) { } }
this . geocode = location . getLatitude ( ) + "," + location . getLongitude ( ) + "," + radius + unit . name ( ) ;
public class RetryPolicy { /** * Special case during shutdown . * @ param e possible instance of , or has cause for , an InterruptedException * @ return true if it is transitively an InterruptedException */ private boolean isInterruptTransitively ( Throwable e ) { } }
do { if ( e instanceof InterruptedException ) { return true ; } e = e . getCause ( ) ; } while ( e != null ) ; return false ;
public class LdapURL { /** * Returns the search scope used in LDAP search */ public int get_searchScope ( ) { } }
int searchScope = SearchControls . OBJECT_SCOPE ; String scopeBuf = get_scope ( ) ; if ( scopeBuf != null ) { if ( scopeBuf . compareToIgnoreCase ( "base" ) == 0 ) { searchScope = SearchControls . OBJECT_SCOPE ; } else if ( scopeBuf . compareToIgnoreCase ( "one" ) == 0 ) { searchScope = SearchControls . ONELEVEL_SCOPE ; } else if ( scopeBuf . compareToIgnoreCase ( "sub" ) == 0 ) { searchScope = SearchControls . SUBTREE_SCOPE ; } } return searchScope ;
public class CmsGalleryService { /** * Generates a map with all available content types . < p > * The map uses resource type name as the key and stores the CmsTypesListInfoBean as the value . * @ param types the resource types * @ param creatableTypes the creatable types * @ return the map containing the available resource types */ private List < CmsResourceTypeBean > buildTypesList ( List < I_CmsResourceType > types , Set < String > creatableTypes ) { } }
ArrayList < CmsResourceTypeBean > list = new ArrayList < CmsResourceTypeBean > ( ) ; if ( types == null ) { return list ; } Map < I_CmsResourceType , I_CmsPreviewProvider > typeProviderMapping = getPreviewProviderForTypes ( types ) ; Iterator < I_CmsResourceType > it = types . iterator ( ) ; while ( it . hasNext ( ) ) { I_CmsResourceType type = it . next ( ) ; try { CmsResourceTypeBean bean = createTypeBean ( type , typeProviderMapping . get ( type ) , creatableTypes . contains ( type . getTypeName ( ) ) ) ; list . add ( bean ) ; } catch ( Exception e ) { if ( type != null ) { log ( Messages . get ( ) . getBundle ( getWorkplaceLocale ( ) ) . key ( Messages . ERR_BUILD_TYPE_LIST_1 , type . getTypeName ( ) ) , e ) ; } } } return list ;
public class MatrixVectorMult_DDRM { /** * Performs a matrix vector multiply . < br > * < br > * c = A * b < br > * and < br > * c = A * b < sup > T < / sup > < br > * < br > * c < sub > i < / sub > = Sum { j = 1 : n , a < sub > ij < / sub > * b < sub > j < / sub > } < br > * < br > * where A is a matrix , b is a column or transposed row vector , and c is a column vector . * @ param A A matrix that is m by n . Not modified . * @ param B A vector that has length n . Not modified . * @ param C A column vector that has length m . Modified . */ public static void mult ( DMatrix1Row A , DMatrixD1 B , DMatrixD1 C ) { } }
if ( B . numRows == 1 ) { if ( A . numCols != B . numCols ) { throw new MatrixDimensionException ( "A and B are not compatible" ) ; } } else if ( B . numCols == 1 ) { if ( A . numCols != B . numRows ) { throw new MatrixDimensionException ( "A and B are not compatible" ) ; } } else { throw new MatrixDimensionException ( "B is not a vector" ) ; } C . reshape ( A . numRows , 1 ) ; if ( A . numCols == 0 ) { CommonOps_DDRM . fill ( C , 0 ) ; return ; } int indexA = 0 ; int cIndex = 0 ; double b0 = B . get ( 0 ) ; for ( int i = 0 ; i < A . numRows ; i ++ ) { double total = A . get ( indexA ++ ) * b0 ; for ( int j = 1 ; j < A . numCols ; j ++ ) { total += A . get ( indexA ++ ) * B . get ( j ) ; } C . set ( cIndex ++ , total ) ; }
public class BadRequest { /** * < pre > * Describes all violations in a client request . * < / pre > * < code > repeated . google . rpc . BadRequest . FieldViolation field _ violations = 1 ; < / code > */ public com . google . rpc . BadRequest . FieldViolationOrBuilder getFieldViolationsOrBuilder ( int index ) { } }
return fieldViolations_ . get ( index ) ;
public class ThresholdMarshaller { /** * Marshall the given parameter object . */ public void marshall ( Threshold threshold , ProtocolMarshaller protocolMarshaller ) { } }
if ( threshold == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( threshold . getComparison ( ) , COMPARISON_BINDING ) ; protocolMarshaller . marshall ( threshold . getThresholdValue ( ) , THRESHOLDVALUE_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class SessionManager { /** * not called from XD */ @ Override public ISession getISession ( String id ) { } }
return ( ISession ) getSession ( id , 0 , false , null ) ;
public class ChunkFetchSuccess { /** * Decoding uses the given ByteBuf as our data , and will retain ( ) it . */ public static ChunkFetchSuccess decode ( ByteBuf buf ) { } }
StreamChunkId streamChunkId = StreamChunkId . decode ( buf ) ; buf . retain ( ) ; NettyManagedBuffer managedBuf = new NettyManagedBuffer ( buf . duplicate ( ) ) ; return new ChunkFetchSuccess ( streamChunkId , managedBuf ) ;
public class SF424V2_1Generator { /** * This method creates { @ link XmlObject } of type { @ link SF42421Document } by populating data from the given * { @ link ProposalDevelopmentDocumentContract } * @ param proposalDevelopmentDocument for which the { @ link XmlObject } needs to be created * @ return { @ link XmlObject } which is generated using the given { @ link ProposalDevelopmentDocumentContract } */ @ Override public SF42421Document getFormObject ( ProposalDevelopmentDocumentContract proposalDevelopmentDocument ) { } }
this . pdDoc = proposalDevelopmentDocument ; aorInfo = departmentalPersonService . getDepartmentalPerson ( pdDoc ) ; return getSF42421Doc ( ) ;
public class AbstractControllerServer { /** * { @ inheritDoc } * @ param consumer { @ inheritDoc } * @ return { @ inheritDoc } */ @ Override public ClosableDataBuilder < MB > getDataBuilder ( final Object consumer , final boolean notifyChange ) { } }
return new ClosableDataBuilder < > ( getBuilderSetup ( ) , consumer , notifyChange ) ;
public class LazyJobLogger { /** * 检查内存中的日志量是否超过阀值 , 如果超过需要批量刷盘日志 */ private void checkCapacity ( ) { } }
if ( memoryQueue . size ( ) > maxMemoryLogSize ) { // 超过阀值 , 需要批量刷盘 if ( flushing . compareAndSet ( false , true ) ) { // 这里可以采用new Thread , 因为这里只会同时new一个 new Thread ( new Runnable ( ) { @ Override public void run ( ) { try { checkAndFlush ( ) ; } catch ( Throwable t ) { LOGGER . error ( "Capacity full flush error" , t ) ; } } } ) . start ( ) ; } }
public class CmsMessageWidget { /** * Sets the icon CSS class . < p > * @ param icon the icon * @ param color the icon color */ public void setIcon ( FontOpenCms icon , String color ) { } }
if ( icon != null ) { m_iconCell . setInnerHTML ( icon . getHtml ( 32 , color ) ) ; } else { m_iconCell . setInnerHTML ( "" ) ; }
public class DSIdGenerator { /** * ( non - Javadoc ) * @ see * com . impetus . kundera . generator . AutoGenerator # generate ( com . impetus . kundera * . client . Client , java . lang . Object ) */ @ Override public Object generate ( Client < ? > client , String dataType ) { } }
final String generatedId = "Select now() from system_schema.columns" ; ResultSet rSet = ( ( DSClient ) client ) . execute ( generatedId , null ) ; UUID uuid = rSet . iterator ( ) . next ( ) . getUUID ( 0 ) ; return uuid ;
public class CreateDedicatedIpPoolRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( CreateDedicatedIpPoolRequest createDedicatedIpPoolRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( createDedicatedIpPoolRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( createDedicatedIpPoolRequest . getPoolName ( ) , POOLNAME_BINDING ) ; protocolMarshaller . marshall ( createDedicatedIpPoolRequest . getTags ( ) , TAGS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class Pair { /** * Gets a mapped presentation of the pair . * @ return mapped presentation of the pair . * @ since v1.1.0 */ public Map < K , V > toMap ( ) { } }
Map < K , V > result = new HashMap < K , V > ( ) ; result . put ( key , value ) ; return result ;
public class HeterogeneousMixture { /** * from superclass */ public final int bubblePressure ( double pressureEstimate , Map < String , Double > vaporFractionsEstimate ) { } }
for ( Compound c : components ) { double fraction = vaporFractionsEstimate . get ( c . getName ( ) ) ; getVapor ( ) . setFraction ( c , fraction ) ; } setPressure ( pressureEstimate ) ; return bubblePressureImpl ( ) ;
public class ExpressRoutePortsInner { /** * Deletes the specified ExpressRoutePort resource . * @ param resourceGroupName The name of the resource group . * @ param expressRoutePortName The name of the ExpressRoutePort resource . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent */ public void beginDelete ( String resourceGroupName , String expressRoutePortName ) { } }
beginDeleteWithServiceResponseAsync ( resourceGroupName , expressRoutePortName ) . toBlocking ( ) . single ( ) . body ( ) ;
public class ArrayUtils { /** * originally licensed under ASL 2.0 */ public static Object [ ] subarray ( Object [ ] array , int startIndexInclusive , int endIndexExclusive ) { } }
int newSize = endIndexExclusive - startIndexInclusive ; Class < ? > type = array . getClass ( ) . getComponentType ( ) ; if ( newSize <= 0 ) { return ( Object [ ] ) Array . newInstance ( type , 0 ) ; } Object [ ] subarray = ( Object [ ] ) Array . newInstance ( type , newSize ) ; System . arraycopy ( array , startIndexInclusive , subarray , 0 , newSize ) ; return subarray ;
public class Snappy { /** * Uncompress the content in the input buffer . The uncompressed data is * written to the output buffer . * Note that if you pass the wrong data or the range [ inputOffset , * inputOffset + inputLength ) that cannot be uncompressed , your JVM might * crash due to the access violation exception issued in the native code * written in C + + . To avoid this type of crash , use * { @ link # isValidCompressedBuffer ( byte [ ] , int , int ) } first . * @ param input * @ param inputOffset * @ param inputLength * @ param output * @ param outputOffset * @ return the byte size of the uncompressed data * @ throws IOException */ public static int uncompress ( byte [ ] input , int inputOffset , int inputLength , byte [ ] output , int outputOffset ) throws IOException { } }
return rawUncompress ( input , inputOffset , inputLength , output , outputOffset ) ;
public class HttpConnection { protected void statsRequestStart ( ) { } }
if ( _statsOn ) { if ( _reqTime > 0 ) statsRequestEnd ( ) ; _requests ++ ; _tmpTime = _request . getTimeStamp ( ) ; _reqTime = _tmpTime ; _httpServer . statsGotRequest ( ) ; }
public class Similarity { /** * Computes the Spearman rank correlation coefficient for the two { @ code * DoubleVector } instances . * < p > This implementation properly accounts for ties according to the * procedure specified in < i > Nonparametric Statistics for The Behavioral * Sciences < / i > by Sidney Siegel and N . John Castellan Jr . Second * Ed . ( 1988 ) . * @ throws IllegalArgumentException when the length of the two vectors are * not the same . */ public static double spearmanRankCorrelationCoefficient ( DoubleVector a , DoubleVector b ) { } }
// NOTE : should this code ever be on the critical path , we should // re - implement it to operate on the Vector instances themselves return spearmanRankCorrelationCoefficient ( a . toArray ( ) , b . toArray ( ) ) ;
public class Calc { /** * Multiply elements of a by s ( in place ) * @ param a * @ param s * @ return the modified a */ public static Atom scaleEquals ( Atom a , double s ) { } }
double x = a . getX ( ) ; double y = a . getY ( ) ; double z = a . getZ ( ) ; x *= s ; y *= s ; z *= s ; // Atom b = new AtomImpl ( ) ; a . setX ( x ) ; a . setY ( y ) ; a . setZ ( z ) ; return a ;
public class InstallerModule { /** * Performs classpath scan to find all classes implementing or use only manually configured installers . * { @ link FeatureInstaller } . * @ return list of found installers or empty list */ @ SuppressWarnings ( "unchecked" ) private List < Class < ? extends FeatureInstaller > > findInstallers ( ) { } }
if ( scanner != null ) { final List < Class < ? extends FeatureInstaller > > installers = Lists . newArrayList ( ) ; scanner . scan ( new ClassVisitor ( ) { @ Override public void visit ( final Class < ? > type ) { if ( FeatureUtils . is ( type , FeatureInstaller . class ) ) { installers . add ( ( Class < ? extends FeatureInstaller > ) type ) ; } } } ) ; // sort to unify registration order on different systems installers . sort ( Comparator . comparing ( Class :: getName ) ) ; context . registerInstallersFromScan ( installers ) ; } final List < Class < ? extends FeatureInstaller > > installers = context . getEnabledInstallers ( ) ; installers . sort ( COMPARATOR ) ; logger . debug ( "Found {} installers" , installers . size ( ) ) ; return installers ;
public class Actions { /** * Converts an { @ link Action4 } to a function that calls the action and returns a specified value . * @ param action the { @ link Action4 } to convert * @ param result the value to return from the function call * @ return a { @ link Func4 } that calls { @ code action } and returns { @ code result } */ public static < T1 , T2 , T3 , T4 , R > Func4 < T1 , T2 , T3 , T4 , R > toFunc ( final Action4 < T1 , T2 , T3 , T4 > action , final R result ) { } }
return new Func4 < T1 , T2 , T3 , T4 , R > ( ) { @ Override public R call ( T1 t1 , T2 t2 , T3 t3 , T4 t4 ) { action . call ( t1 , t2 , t3 , t4 ) ; return result ; } } ;
public class CoreUserApiKeyAuthProviderClient { /** * Enables a user API key associated with the current user . * @ param id The id of the API key to enable . */ protected void enableApiKeyInternal ( final ObjectId id ) { } }
final StitchAuthRequest . Builder reqBuilder = new StitchAuthRequest . Builder ( ) ; reqBuilder . withMethod ( Method . PUT ) . withPath ( routes . getApiKeyEnableRouteForId ( id . toHexString ( ) ) ) . withRefreshToken ( ) ; getRequestClient ( ) . doAuthenticatedRequest ( reqBuilder . build ( ) ) ;
public class DonutOptions { /** * Creates the data displayed in the donut chart . */ private List < BrowserUsageData > getBrowserData ( ) { } }
List < BrowserUsageData > browserData = new ArrayList < BrowserUsageData > ( ) ; browserData . add ( getMSIEUsageData ( ) ) ; browserData . add ( getFirefoxUsageData ( ) ) ; browserData . add ( getChromeUsageData ( ) ) ; browserData . add ( getSafariUsageData ( ) ) ; browserData . add ( getOperaUsageData ( ) ) ; return browserData ;
public class ComponentRegistry { /** * Register a component with FQCN only . This method will try to get the class version using reflections ! * @ param _ string fqcn */ public synchronized void registerComponent ( String _string ) { } }
if ( isIncluded ( _string ) ) { Class < ? > dummy ; try { dummy = Class . forName ( _string ) ; String classVersion = getVersionWithReflection ( dummy ) ; if ( classVersion != null ) { componentVersions . put ( _string , classVersion ) ; } } catch ( ClassNotFoundException ex ) { logger . trace ( "Unable to call getVersion on " + _string ) ; } }
public class PlainDate { /** * < p > Erzeugt ein neues Datum passend zur angegebenen absoluten Zeit . < / p > * @ param ut unix time * @ param offset shift of local time relative to UTC * @ return new calendar date */ static PlainDate from ( UnixTime ut , ZonalOffset offset ) { } }
long localSeconds = ut . getPosixTime ( ) + offset . getIntegralAmount ( ) ; int localNanos = ut . getNanosecond ( ) + offset . getFractionalAmount ( ) ; if ( localNanos < 0 ) { localSeconds -- ; } else if ( localNanos >= 1000000000 ) { localSeconds ++ ; } long mjd = EpochDays . MODIFIED_JULIAN_DATE . transform ( MathUtils . floorDivide ( localSeconds , 86400 ) , EpochDays . UNIX ) ; long packedDate = GregorianMath . toPackedDate ( mjd ) ; return PlainDate . of ( GregorianMath . readYear ( packedDate ) , GregorianMath . readMonth ( packedDate ) , GregorianMath . readDayOfMonth ( packedDate ) ) ;
public class DefaultSensorStorage { /** * Thread safe assuming that each issues for each file are only written once . */ @ Override public void store ( DefaultExternalIssue externalIssue ) { } }
if ( externalIssue . primaryLocation ( ) . inputComponent ( ) instanceof DefaultInputFile ) { DefaultInputFile defaultInputFile = ( DefaultInputFile ) externalIssue . primaryLocation ( ) . inputComponent ( ) ; defaultInputFile . setPublished ( true ) ; } moduleIssues . initAndAddExternalIssue ( externalIssue ) ;
public class CmsLockManager { /** * Removes a resource from the lock manager . < p > * The forceUnlock option should be used with caution . < br > * forceUnlock will remove the lock by ignoring any rules which may cause wrong lock states . < p > * @ param dbc the current database context * @ param resource the resource * @ param forceUnlock < code > true < / code > , if a resource is forced to get unlocked ( only edition locks ) , * no matter by which user and in which project the resource is currently locked * @ param removeSystemLock < code > true < / code > , if you also want to remove system locks * @ return the previous { @ link CmsLock } object of the resource , * or < code > { @ link CmsLock # getNullLock ( ) } < / code > if the resource was unlocked * @ throws CmsException if something goes wrong */ public CmsLock removeResource ( CmsDbContext dbc , CmsResource resource , boolean forceUnlock , boolean removeSystemLock ) throws CmsException { } }
String resourcename = resource . getRootPath ( ) ; CmsLock lock = getLock ( dbc , resource ) . getEditionLock ( ) ; // check some abort conditions first if ( ! lock . isNullLock ( ) ) { // the resource is locked by another user or in other project if ( ! forceUnlock && ( ! lock . isOwnedInProjectBy ( dbc . currentUser ( ) , dbc . currentProject ( ) ) ) ) { throw new CmsLockException ( Messages . get ( ) . container ( Messages . ERR_RESOURCE_UNLOCK_1 , dbc . removeSiteRoot ( resourcename ) ) ) ; } // sub - resources of a locked folder can ' t be unlocked if ( ! forceUnlock && lock . isInherited ( ) ) { throw new CmsLockException ( Messages . get ( ) . container ( Messages . ERR_UNLOCK_LOCK_INHERITED_1 , dbc . removeSiteRoot ( resourcename ) ) ) ; } } // remove the lock and clean - up stuff if ( lock . isExclusive ( ) ) { if ( resource . isFolder ( ) ) { // in case of a folder , remove any exclusive locks on sub - resources that probably have // been upgraded from an inherited lock when the user edited a resource Iterator < CmsLock > itLocks = OpenCms . getMemoryMonitor ( ) . getAllCachedLocks ( ) . iterator ( ) ; while ( itLocks . hasNext ( ) ) { String lockedPath = ( itLocks . next ( ) ) . getResourceName ( ) ; if ( lockedPath . startsWith ( resourcename ) && ! lockedPath . equals ( resourcename ) ) { // remove the exclusive locked sub - resource unlockResource ( lockedPath , false ) ; } } } if ( removeSystemLock ) { unlockResource ( resourcename , true ) ; } unlockResource ( resourcename , false ) ; return lock ; } if ( lock . getType ( ) . isSharedExclusive ( ) ) { List < String > locks = OpenCms . getMemoryMonitor ( ) . getAllCachedLockPaths ( ) ; // when a resource with a shared lock gets unlocked , fetch all siblings of the resource // to the same content record to identify the exclusive locked sibling List < CmsResource > siblings = internalReadSiblings ( dbc , resource ) ; for ( int i = 0 ; i < siblings . size ( ) ; i ++ ) { CmsResource sibling = siblings . get ( i ) ; if ( locks . contains ( sibling . getRootPath ( ) ) ) { // remove the exclusive locked sibling if ( removeSystemLock ) { unlockResource ( sibling . getRootPath ( ) , true ) ; } unlockResource ( sibling . getRootPath ( ) , false ) ; break ; // it can only be one ! } } return lock ; } // remove system locks only if explicit required if ( removeSystemLock && ! getLock ( dbc , resource ) . getSystemLock ( ) . isUnlocked ( ) ) { return unlockResource ( resourcename , true ) ; } return lock ;
public class Ifc2x3tc1FactoryImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public IfcJunctionBoxTypeEnum createIfcJunctionBoxTypeEnumFromString ( EDataType eDataType , String initialValue ) { } }
IfcJunctionBoxTypeEnum result = IfcJunctionBoxTypeEnum . get ( initialValue ) ; if ( result == null ) throw new IllegalArgumentException ( "The value '" + initialValue + "' is not a valid enumerator of '" + eDataType . getName ( ) + "'" ) ; return result ;
public class NetworkInterfacesInner { /** * Gets information about all network interfaces in a virtual machine in a virtual machine scale set . * @ param nextPageLink The NextLink from the previous successful call to List operation . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the PagedList & lt ; NetworkInterfaceInner & gt ; object if successful . */ public PagedList < NetworkInterfaceInner > listVirtualMachineScaleSetVMNetworkInterfacesNext ( final String nextPageLink ) { } }
ServiceResponse < Page < NetworkInterfaceInner > > response = listVirtualMachineScaleSetVMNetworkInterfacesNextSinglePageAsync ( nextPageLink ) . toBlocking ( ) . single ( ) ; return new PagedList < NetworkInterfaceInner > ( response . body ( ) ) { @ Override public Page < NetworkInterfaceInner > nextPage ( String nextPageLink ) { return listVirtualMachineScaleSetVMNetworkInterfacesNextSinglePageAsync ( nextPageLink ) . toBlocking ( ) . single ( ) . body ( ) ; } } ;
public class BuildContext { /** * Retrieve and clear the relative root for this context . * @ param previousValue * previous value of the relative root to restore * @ return value of the relative root which was replaced */ public HashResource restoreRelativeRoot ( HashResource previousValue ) { } }
HashResource value = relativeRoot ; relativeRoot = previousValue ; return value ;
public class RtfByteArrayBuffer { /** * Copies the given byte to the internal buffer . * @ param b */ public void write ( final int b ) { } }
buffer [ pos ] = ( byte ) b ; size ++ ; if ( ++ pos == buffer . length ) flushBuffer ( ) ;
public class JsMessageFactoryImpl { /** * Utility method to extract the schema ids from a message buffer and , if a * message store is supplied , check that all the necessary schemas are available . * @ param buffer The buffer containing the schema ids * @ param offset The offset into the buffer where the schema ids start * @ param store The MesasgeStore from which the message is being recovered , may be null . * @ return int The offset in the buffer of the first byte after the schema ids * @ exception MessageRestoreFailedException is thrown if the necessary schemas are not available . */ private final static int ensureSchemasAvailable ( byte [ ] buffer , int offset , Object store ) throws MessageRestoreFailedException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "ensureSchemasAvailable" , new Object [ ] { offset , store } ) ; // If we have a message store we need to ensure all the schemas we ' ll // need to decode the message are restored from the store . int temp = ArrayUtil . readInt ( buffer , offset ) ; // the number of encoding Ids offset += ArrayUtil . INT_SIZE ; long [ ] decodeIds = new long [ temp ] ; for ( int i = 0 ; i < temp ; i ++ ) { decodeIds [ i ] = ArrayUtil . readLong ( buffer , offset ) ; // each encoding schema id offset += ArrayUtil . LONG_SIZE ; } if ( store != null && decodeIds . length > 0 ) { if ( ! ( store instanceof MessageStore ) ) throw new IllegalArgumentException ( "store is not a MessageStore instance" ) ; SchemaStore . loadSchemas ( ( MessageStore ) store , decodeIds ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "ensureSchemasAvailable" , offset ) ; return offset ;
public class Calendar { /** * Sets the given calendar field value and the time value * ( millisecond offset from the < a href = " # Epoch " > Epoch < / a > ) of * this < code > Calendar < / code > undefined . This means that { @ link * # isSet ( int ) isSet ( field ) } will return < code > false < / code > , and * the date and time calculations will treat the field as if it * had never been set . A < code > Calendar < / code > implementation * class may use the field ' s specific default value for date and * time calculations . * < p > The { @ link # HOUR _ OF _ DAY } , { @ link # HOUR } and { @ link # AM _ PM } * fields are handled independently and the < a * href = " # time _ resolution " > the resolution rule for the time of * day < / a > is applied . Clearing one of the fields doesn ' t reset * the hour of day value of this < code > Calendar < / code > . Use { @ link * # set ( int , int ) set ( Calendar . HOUR _ OF _ DAY , 0 ) } to reset the hour * value . * @ param field the calendar field to be cleared . * @ see # clear ( ) */ public final void clear ( int field ) { } }
fields [ field ] = 0 ; stamp [ field ] = UNSET ; isSet [ field ] = false ; areAllFieldsSet = areFieldsSet = false ; isTimeSet = false ;
public class CmsEmbeddedDialogsUI { /** * Returns the dialog id extracted from the requested path . < p > * @ param request the request * @ return the id */ private String getDialogId ( VaadinRequest request ) { } }
String path = request . getPathInfo ( ) ; // remove the leading slash return path != null ? path . substring ( 1 ) : null ;
public class ReferenceCountUtil { /** * Tries to call { @ link ReferenceCounted # touch ( ) } if the specified message implements { @ link ReferenceCounted } . * If the specified message doesn ' t implement { @ link ReferenceCounted } , this method does nothing . */ @ SuppressWarnings ( "unchecked" ) public static < T > T touch ( T msg ) { } }
if ( msg instanceof ReferenceCounted ) { return ( T ) ( ( ReferenceCounted ) msg ) . touch ( ) ; } return msg ;
public class NinjaEbeanServerLifecycle { /** * This method reads the configuration properties from * your application . conf file and configures Ebean accordingly . */ public final void startServer ( ) { } }
logger . info ( "Starting Ebeans Module." ) ; // Setup basic parameters boolean ebeanDdlGenerate = ninjaProperties . getBooleanWithDefault ( EBEAN_DDL_GENERATE , false ) ; boolean ebeanDdlRun = ninjaProperties . getBooleanWithDefault ( EBEAN_DDL_RUN , false ) ; String ebeanDdlInitSql = ninjaProperties . get ( EBEAN_DDL_INIT_SQL ) ; String ebeanDdlSeedSql = ninjaProperties . get ( EBEAN_DDL_SEED_SQL ) ; String ebeanDatasourceName = ninjaProperties . getWithDefault ( EBEAN_DATASOURCE_NAME , "default" ) ; String ebeanDatasourceUserName = ninjaProperties . getWithDefault ( EBEAN_DATASOURCE_USERNAME , "test" ) ; String ebeanDatasourcePassword = ninjaProperties . getWithDefault ( EBEAN_DATASOURCE_PASSWORD , "test" ) ; String ebeanDatasourceDatabaseUrl = ninjaProperties . getWithDefault ( EBEAN_DATASOURCE_DATABASE_URL , "jdbc:h2:mem:tests;DB_CLOSE_DELAY=-1" ) ; String ebeanDatasourceDatabaseDriver = ninjaProperties . getWithDefault ( EBEAN_DATASOURCE_DATABASE_DRIVER , "org.h2.Driver" ) ; int ebeanDatasourceMinConnections = ninjaProperties . getIntegerWithDefault ( EBEAN_DATASOURCE_MIN_CONNECTIONS , 1 ) ; int ebeanDatasourceMaxConnections = ninjaProperties . getIntegerWithDefault ( EBEAN_DATASOURCE_MAX_CONNECTIONS , 25 ) ; String ebeanDatasourceHeartbeatSql = ninjaProperties . getWithDefault ( EBEAN_DATASOURCE_HEARTBEAT_SQL , "select 1" ) ; ServerConfig serverConfig = new ServerConfig ( ) ; serverConfig . setName ( ebeanDatasourceName ) ; serverConfig . loadFromProperties ( ) ; // Define DataSource parameters DataSourceConfig dataSourceConfig = new DataSourceConfig ( ) ; dataSourceConfig . setDriver ( ebeanDatasourceDatabaseDriver ) ; dataSourceConfig . setUsername ( ebeanDatasourceUserName ) ; dataSourceConfig . setPassword ( ebeanDatasourcePassword ) ; dataSourceConfig . setUrl ( ebeanDatasourceDatabaseUrl ) ; dataSourceConfig . setMinConnections ( ebeanDatasourceMinConnections ) ; dataSourceConfig . setMaxConnections ( ebeanDatasourceMaxConnections ) ; dataSourceConfig . setHeartbeatSql ( ebeanDatasourceHeartbeatSql ) ; serverConfig . setDataSourceConfig ( dataSourceConfig ) ; // set DDL options . . . serverConfig . setDdlGenerate ( ebeanDdlGenerate ) ; serverConfig . setDdlRun ( ebeanDdlRun ) ; serverConfig . setDdlInitSql ( ebeanDdlInitSql ) ; serverConfig . setDdlSeedSql ( ebeanDdlSeedSql ) ; serverConfig . setDefaultServer ( true ) ; serverConfig . setRegister ( true ) ; // split models configuration into classes & packages Set < String > packageNames = new LinkedHashSet < > ( ) ; Set < Class < ? > > entityClasses = new LinkedHashSet < > ( ) ; // models always added by default packageNames . add ( "models" ) ; // add manually listed classes from the property String [ ] manuallyListedModels = ninjaProperties . getStringArray ( EBEAN_MODELS ) ; if ( manuallyListedModels != null ) { for ( String model : manuallyListedModels ) { if ( model . endsWith ( ".*" ) ) { // strip off . * at end String packageName = model . substring ( 0 , model . length ( ) - 2 ) ; packageNames . add ( packageName ) ; } else { try { entityClasses . add ( Class . forName ( model ) ) ; } catch ( ClassNotFoundException e ) { throw new RuntimeException ( "Configuration error. Class listed/discovered via " + EBEAN_MODELS + " not found: " + model ) ; } } } } // if any packages were specified the reflections library MUST be available if ( ! packageNames . isEmpty ( ) ) { for ( String packageName : packageNames ) { Set < String > packageClasses = ReflectionsHelper . findAllClassesInPackage ( packageName ) ; logger . info ( "Searched and found " + packageClasses . size ( ) + " classes in package " + packageName ) ; for ( String packageClass : packageClasses ) { try { entityClasses . add ( Class . forName ( packageClass ) ) ; } catch ( ClassNotFoundException e ) { // should be impossible since Reflections just found ' em throw new RuntimeException ( "Something fishy happenend. Unable to find class " + packageClass ) ; } } } } for ( Class < ? > entityClass : entityClasses ) { serverConfig . addClass ( entityClass ) ; } // create the EbeanServer instance ebeanServer = createEbeanServer ( serverConfig ) ; // Activate the Ebean shutdown manager ( disconnects from db , shuts down all threads and so on ) ShutdownManager . touch ( ) ;
public class FacebookAlbumListFragment { /** * Asynchronously requests the Page accounts associated with the linked account . Calls * { @ link # requestPageAlbums ( Queue , List ) } when completed . */ private void requestAccounts ( ) { } }
Callback callback = new Callback ( ) { @ Override public void onCompleted ( Response response ) { FacebookSettingsActivity activity = ( FacebookSettingsActivity ) getActivity ( ) ; if ( activity == null || activity . isFinishing ( ) ) { return ; } if ( response != null && response . getError ( ) == null ) { Queue < PageAccount > pageAccounts = new LinkedBlockingQueue < > ( ) ; List < Object [ ] > pageAlbums = new ArrayList < > ( ) ; GraphObject graphObject = response . getGraphObject ( ) ; if ( graphObject != null ) { JSONObject jsonObject = graphObject . getInnerJSONObject ( ) ; try { JSONArray jsonArray = jsonObject . getJSONArray ( FacebookEndpoint . ACCOUNTS_LISTING_RESULT_DATA_KEY ) ; long cursorId = 1L ; for ( int i = 0 ; i < jsonArray . length ( ) ; i ++ ) { try { // Get data from json . JSONObject account = jsonArray . getJSONObject ( i ) ; String id = account . getString ( FacebookEndpoint . ACCOUNTS_LISTING_FIELD_ID ) ; String name = account . getString ( FacebookEndpoint . ACCOUNTS_LISTING_FIELD_NAME ) ; String pageAccessToken = account . getString ( FacebookEndpoint . ACCOUNTS_LISTING_FIELD_ACCESS_TOKEN ) ; JSONArray perms = account . getJSONArray ( FacebookEndpoint . ACCOUNTS_LISTING_FIELD_PERMS ) ; // Add Page albums with content creation permission . for ( int j = 0 ; j < perms . length ( ) ; j ++ ) { if ( FacebookEndpoint . ACCOUNT_PERM_CREATE_CONTENT . equals ( perms . optString ( j ) ) ) { if ( id != null && id . length ( ) > 0 && name != null && name . length ( ) > 0 && pageAccessToken != null && pageAccessToken . length ( ) > 0 ) { // Add Page account to queue . pageAccounts . add ( new PageAccount ( id , name , pageAccessToken ) ) ; // Add Page wall album to Page albums . String graphPath = id + FacebookEndpoint . TO_PAGE_WALL_GRAPH_PATH ; pageAlbums . add ( new Object [ ] { cursorId , FacebookEndpoint . DestinationId . PAGE , name , graphPath , FacebookEndpoint . PAGE_PRIVACY , pageAccessToken } ) ; cursorId ++ ; } break ; } } } catch ( JSONException e ) { // Do nothing . } } } catch ( JSONException e ) { // Do nothing . } } // Request for Profile albums . requestPageAlbums ( pageAccounts , pageAlbums ) ; } else { // Finish Activity with error . activity . mHasErrorOccurred = true ; activity . tryFinish ( ) ; } } } ; mFacebookEndpoint . requestAccounts ( callback ) ;
public class StoreFactoryImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public String convertSmtpProtocolToString ( EDataType eDataType , Object instanceValue ) { } }
return instanceValue == null ? null : instanceValue . toString ( ) ;
public class ReflectedHeap { /** * { @ inheritDoc } */ @ Override public Handle < K , V > insert ( K key , V value ) { } }
if ( key == null ) { throw new NullPointerException ( "Null keys not permitted" ) ; } else if ( other != this ) { throw new IllegalStateException ( "A heap cannot be used after a meld" ) ; } else if ( size % 2 == 0 ) { free = new ReflectedHandle < K , V > ( this , key , value ) ; size ++ ; return free ; } else { ReflectedHandle < K , V > newHandle = new ReflectedHandle < K , V > ( this , key , value ) ; insertPair ( newHandle , free ) ; free = null ; size ++ ; return newHandle ; }
public class FSNamesystem { /** * Modify ( block - - > datanode ) map . Possibly generate * replication tasks , if the removed block is still valid . */ private void removeStoredBlock ( Block block , DatanodeDescriptor node ) { } }
if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "BLOCK* NameSystem.removeStoredBlock: " + block + " from " + node . getName ( ) ) ; } if ( ! blocksMap . removeNode ( block , node ) ) { if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "BLOCK* NameSystem.removeStoredBlock: " + block + " has already been removed from node " + node ) ; } return ; } // if file is being actively written to and it is the last block , // then do not check replication - factor here . BlockInfo storedBlock = blocksMap . getStoredBlock ( block ) ; INodeFile fileINode = ( storedBlock == null ) ? null : storedBlock . getINode ( ) ; if ( fileINode != null && fileINode . isUnderConstruction ( ) && fileINode . isLastBlock ( storedBlock ) ) { decrementSafeBlockCount ( block ) ; return ; } // It ' s possible that the block was removed because of a datanode // failure . If the block is still valid , check if replication is // necessary . In that case , put block on a possibly - will - // be - replicated list . if ( fileINode != null ) { decrementSafeBlockCount ( block ) ; // handle under replication // Use storedBlock here because block maybe a deleted block with size DELETED if ( isPopulatingReplQueuesInternal ( ) ) { NumberReplicas num = countNodes ( storedBlock ) ; int numCurrentReplicas = num . liveReplicas ( ) + pendingReplications . getNumReplicas ( storedBlock ) ; updateNeededReplicationQueue ( storedBlock , - 1 , numCurrentReplicas , num . decommissionedReplicas , node , fileINode . getBlockReplication ( storedBlock ) ) ; } } // We ' ve removed a block from a node , so it ' s definitely no longer // in " excess " there . removeFromExcessReplicateMap ( block , node ) ; // Remove the replica from corruptReplicas corruptReplicas . removeFromCorruptReplicasMap ( block , node ) ;
public class Ascii { /** * Returns if the character sequence { @ code seq } ends with the character sequence { @ code suffix } * ignoring the case of any ASCII alphabetic characters * between { @ code ' a ' } and { @ code ' z ' } or { @ code ' A ' } and { @ code ' Z ' } inclusive . * @ since NEXT */ public static boolean endsWithIgnoreCase ( CharSequence seq , CharSequence suffix ) { } }
return startsWithIgnoreCase ( seq , suffix , seq . length ( ) - suffix . length ( ) ) ;
public class AbstractControllerConfiguration { /** * Creates a resource part of the path unified for all routes defined in the inherited class * @ param path resource path of all defined class * @ throws NullPointerException whether { @ code path } is { @ code null } */ protected final void setControllerPath ( String path ) { } }
requireNonNull ( path , "Global path cannot be change to 'null'" ) ; if ( ! "" . equals ( path ) && ! "/" . equals ( path ) ) { this . controllerPath = pathCorrector . apply ( path ) ; }
public class responderpolicy_lbvserver_binding { /** * Use this API to fetch responderpolicy _ lbvserver _ binding resources of given name . */ public static responderpolicy_lbvserver_binding [ ] get ( nitro_service service , String name ) throws Exception { } }
responderpolicy_lbvserver_binding obj = new responderpolicy_lbvserver_binding ( ) ; obj . set_name ( name ) ; responderpolicy_lbvserver_binding response [ ] = ( responderpolicy_lbvserver_binding [ ] ) obj . get_resources ( service ) ; return response ;
public class ExtensionScript { /** * Gets the interface { @ code class1 } from the given { @ code script } . Might return { @ code null } if the { @ code script } does not * implement the interface . * First tries to get the interface directly from the { @ code script } by calling the method * { @ code ScriptWrapper . getInterface ( Class ) } , if it returns { @ code null } the interface will be extracted from the script * after invoking it , using the method { @ code Invocable . getInterface ( Class ) } . * The context class loader of caller thread is replaced with the class loader { @ code AddOnLoader } to allow the script to * access classes of add - ons . If this behaviour is not desired call the method { @ code getInterfaceWithOutAddOnLoader ( } * instead . * @ param script the script that will be invoked * @ param class1 the interface that will be obtained from the script * @ return the interface implemented by the script , or { @ code null } if the { @ code script } does not implement the interface . * @ throws ScriptException if the engine of the given { @ code script } was not found . * @ throws IOException if an error occurred while obtaining the interface directly from the script ( * { @ code ScriptWrapper . getInterface ( Class ) } ) * @ see # getInterfaceWithOutAddOnLoader ( ScriptWrapper , Class ) * @ see ScriptWrapper # getInterface ( Class ) * @ see Invocable # getInterface ( Class ) */ public < T > T getInterface ( ScriptWrapper script , Class < T > class1 ) throws ScriptException , IOException { } }
ClassLoader previousContextClassLoader = Thread . currentThread ( ) . getContextClassLoader ( ) ; Thread . currentThread ( ) . setContextClassLoader ( ExtensionFactory . getAddOnLoader ( ) ) ; try { T iface = script . getInterface ( class1 ) ; if ( iface != null ) { // the script wrapper has overriden the usual scripting mechanism return iface ; } } finally { Thread . currentThread ( ) . setContextClassLoader ( previousContextClassLoader ) ; } if ( script . isRunableStandalone ( ) ) { return null ; } Invocable invocable = invokeScript ( script ) ; if ( invocable != null ) { return invocable . getInterface ( class1 ) ; } return null ;
public class CmsJspActionElement { /** * Returns an initialized { @ link CmsJspNavBuilder } instance . < p > * @ return an initialized navigation builder instance * @ see org . opencms . jsp . CmsJspNavBuilder */ public CmsJspNavBuilder getNavigation ( ) { } }
if ( isNotInitialized ( ) ) { return null ; } if ( m_vfsNav == null ) { m_vfsNav = new CmsJspNavBuilder ( getCmsObject ( ) ) ; } return m_vfsNav ;
public class MapListenerAdaptors { /** * Creates a { @ link com . hazelcast . map . impl . ListenerAdapter } array * for all event types of { @ link com . hazelcast . core . EntryEventType } . * @ param mapListener a { @ link com . hazelcast . map . listener . MapListener } instance . * @ return an array of { @ link com . hazelcast . map . impl . ListenerAdapter } */ public static ListenerAdapter [ ] createListenerAdapters ( MapListener mapListener ) { } }
EntryEventType [ ] values = EntryEventType . values ( ) ; ListenerAdapter [ ] listenerAdapters = new ListenerAdapter [ values . length ] ; for ( EntryEventType eventType : values ) { listenerAdapters [ eventType . ordinal ( ) ] = createListenerAdapter ( eventType , mapListener ) ; } return listenerAdapters ;
public class ControllerRegistrar { /** * Register all methods in the specified controller classes . * @ param controllers */ public final void init ( Class < ? extends Controller > ... controllers ) { } }
List < Class < ? > > classes = Arrays . asList ( controllers ) ; init ( classes ) ;
public class BootstrapContextCoordinator { /** * Set the default bootstrap context * @ param bc The bootstrap context */ public void setDefaultBootstrapContext ( CloneableBootstrapContext bc ) { } }
if ( trace ) log . tracef ( "Default BootstrapContext: %s" , bc ) ; String currentName = null ; if ( defaultBootstrapContext != null ) currentName = defaultBootstrapContext . getName ( ) ; defaultBootstrapContext = bc ; if ( bc != null ) { bootstrapContexts . put ( bc . getName ( ) , bc ) ; } else if ( currentName != null ) { bootstrapContexts . remove ( currentName ) ; }
public class JavaBean { /** * Create new instance of the object * @ param aValues the map value * @ param aClass the class to create * @ param < T > the type class * @ param convert the conversion implementation * @ return the create object instance * @ throws InstantiationException * @ throws IllegalAccessException */ public static < T > T newBean ( Map < ? , ? > aValues , Class < ? > aClass , PropertyConverter < Object , Object > convert ) throws InstantiationException , IllegalAccessException { } }
T obj = ClassPath . newInstance ( aClass ) ; populate ( aValues , obj , convert ) ; return obj ;
public class JobGraphGenerator { /** * This methods implements the pre - visiting during a depth - first traversal . It create the job vertex and * sets local strategy . * @ param node * The node that is currently processed . * @ return True , if the visitor should descend to the node ' s children , false if not . * @ see org . apache . flink . util . Visitor # preVisit ( org . apache . flink . util . Visitable ) */ @ Override public boolean preVisit ( PlanNode node ) { } }
// check if we have visited this node before . in non - tree graphs , this happens if ( this . vertices . containsKey ( node ) || this . chainedTasks . containsKey ( node ) || this . iterations . containsKey ( node ) ) { // return false to prevent further descend return false ; } // the vertex to be created for the current node final JobVertex vertex ; try { if ( node instanceof SinkPlanNode ) { vertex = createDataSinkVertex ( ( SinkPlanNode ) node ) ; } else if ( node instanceof SourcePlanNode ) { vertex = createDataSourceVertex ( ( SourcePlanNode ) node ) ; } else if ( node instanceof BulkIterationPlanNode ) { BulkIterationPlanNode iterationNode = ( BulkIterationPlanNode ) node ; // for the bulk iteration , we skip creating anything for now . we create the graph // for the step function in the post visit . // check that the root of the step function has the same parallelism as the iteration . // because the tail must have the same parallelism as the head , we can only merge the last // operator with the tail , if they have the same parallelism . not merging is currently not // implemented PlanNode root = iterationNode . getRootOfStepFunction ( ) ; if ( root . getParallelism ( ) != node . getParallelism ( ) ) { throw new CompilerException ( "Error: The final operator of the step " + "function has a different parallelism than the iteration operator itself." ) ; } IterationDescriptor descr = new IterationDescriptor ( iterationNode , this . iterationIdEnumerator ++ ) ; this . iterations . put ( iterationNode , descr ) ; vertex = null ; } else if ( node instanceof WorksetIterationPlanNode ) { WorksetIterationPlanNode iterationNode = ( WorksetIterationPlanNode ) node ; // we have the same constraints as for the bulk iteration PlanNode nextWorkSet = iterationNode . getNextWorkSetPlanNode ( ) ; PlanNode solutionSetDelta = iterationNode . getSolutionSetDeltaPlanNode ( ) ; if ( nextWorkSet . getParallelism ( ) != node . getParallelism ( ) ) { throw new CompilerException ( "It is currently not supported that the final operator of the step " + "function has a different parallelism than the iteration operator itself." ) ; } if ( solutionSetDelta . getParallelism ( ) != node . getParallelism ( ) ) { throw new CompilerException ( "It is currently not supported that the final operator of the step " + "function has a different parallelism than the iteration operator itself." ) ; } IterationDescriptor descr = new IterationDescriptor ( iterationNode , this . iterationIdEnumerator ++ ) ; this . iterations . put ( iterationNode , descr ) ; vertex = null ; } else if ( node instanceof SingleInputPlanNode ) { vertex = createSingleInputVertex ( ( SingleInputPlanNode ) node ) ; } else if ( node instanceof DualInputPlanNode ) { vertex = createDualInputVertex ( ( DualInputPlanNode ) node ) ; } else if ( node instanceof NAryUnionPlanNode ) { // skip the union for now vertex = null ; } else if ( node instanceof BulkPartialSolutionPlanNode ) { // create a head node ( or not , if it is merged into its successor ) vertex = createBulkIterationHead ( ( BulkPartialSolutionPlanNode ) node ) ; } else if ( node instanceof SolutionSetPlanNode ) { // this represents an access into the solution set index . // we do not create a vertex for the solution set here ( we create the head at the workset place holder ) // we adjust the joins / cogroups that go into the solution set here for ( Channel c : node . getOutgoingChannels ( ) ) { DualInputPlanNode target = ( DualInputPlanNode ) c . getTarget ( ) ; JobVertex accessingVertex = this . vertices . get ( target ) ; TaskConfig conf = new TaskConfig ( accessingVertex . getConfiguration ( ) ) ; int inputNum = c == target . getInput1 ( ) ? 0 : c == target . getInput2 ( ) ? 1 : - 1 ; // sanity checks if ( inputNum == - 1 ) { throw new CompilerException ( ) ; } // adjust the driver if ( conf . getDriver ( ) . equals ( JoinDriver . class ) ) { conf . setDriver ( inputNum == 0 ? JoinWithSolutionSetFirstDriver . class : JoinWithSolutionSetSecondDriver . class ) ; } else if ( conf . getDriver ( ) . equals ( CoGroupDriver . class ) ) { conf . setDriver ( inputNum == 0 ? CoGroupWithSolutionSetFirstDriver . class : CoGroupWithSolutionSetSecondDriver . class ) ; } else { throw new CompilerException ( "Found join with solution set using incompatible operator (only Join/CoGroup are valid)." ) ; } } // make sure we do not visit this node again . for that , we add a ' already seen ' entry into one of the sets this . chainedTasks . put ( node , ALREADY_VISITED_PLACEHOLDER ) ; vertex = null ; } else if ( node instanceof WorksetPlanNode ) { // create the iteration head here vertex = createWorksetIterationHead ( ( WorksetPlanNode ) node ) ; } else { throw new CompilerException ( "Unrecognized node type: " + node . getClass ( ) . getName ( ) ) ; } } catch ( Exception e ) { throw new CompilerException ( "Error translating node '" + node + "': " + e . getMessage ( ) , e ) ; } // check if a vertex was created , or if it was chained or skipped if ( vertex != null ) { // set parallelism int pd = node . getParallelism ( ) ; vertex . setParallelism ( pd ) ; vertex . setMaxParallelism ( pd ) ; vertex . setSlotSharingGroup ( sharingGroup ) ; // check whether this vertex is part of an iteration step function if ( this . currentIteration != null ) { // check that the task has the same parallelism as the iteration as such PlanNode iterationNode = ( PlanNode ) this . currentIteration ; if ( iterationNode . getParallelism ( ) < pd ) { throw new CompilerException ( "Error: All functions that are part of an iteration must have the same, or a lower, parallelism than the iteration operator." ) ; } // store the id of the iterations the step functions participate in IterationDescriptor descr = this . iterations . get ( this . currentIteration ) ; new TaskConfig ( vertex . getConfiguration ( ) ) . setIterationId ( descr . getId ( ) ) ; } // store in the map this . vertices . put ( node , vertex ) ; } // returning true causes deeper descend return true ;
public class RaftRPC { /** * Setup custom serialization and deserialization for POJO { @ link Command } subclasses . * See { @ code RaftAgent } for more on which { @ code Command } types are supported . * @ param mapper instance of { @ code ObjectMapper } with which the serialization / deserialization mapping is registered * @ param commandSerializer instance of { @ code CommandSerializer } that can serialize a POJO { @ code Command } instance into binary * @ param commandDeserializer instance of { @ code CommandDeserializer } that can deserialize binary into a POJO { @ code Command } instance * @ see io . libraft . agent . RaftAgent */ public static void setupCustomCommandSerializationAndDeserialization ( ObjectMapper mapper , CommandSerializer commandSerializer , CommandDeserializer commandDeserializer ) { } }
SimpleModule module = new SimpleModule ( "raftrpc-custom-command-module" , new Version ( 0 , 0 , 0 , "inline" , "io.libraft" , "raftrpc-command-module" ) ) ; module . addSerializer ( Command . class , new RaftRPCCommand . Serializer ( commandSerializer ) ) ; module . addDeserializer ( Command . class , new RaftRPCCommand . Deserializer ( commandDeserializer ) ) ; mapper . registerModule ( module ) ;
public class DeploymentMetadataParse { /** * Transform a < code > & lt ; plugins . . . / & gt ; < / code > structure . */ protected void parseProcessEnginePlugins ( Element element , List < ProcessEnginePluginXml > plugins ) { } }
for ( Element chidElement : element . elements ( ) ) { if ( PLUGIN . equals ( chidElement . getTagName ( ) ) ) { parseProcessEnginePlugin ( chidElement , plugins ) ; } }
public class MultiLayerNetwork { /** * This method uses provided OutputAdapter to return custom object built from INDArray * PLEASE NOTE : This method uses dedicated Workspace for output generation to avoid redundant allocations * @ param inputs Input arrays to the netwonk * @ param inputMasks Optional input mask arrays ( may be null ) * @ param labelMasks Optional label mask arrays ( may be null * @ param outputAdapter OutputAdapter < T > instance * @ param < T > T extends Object * @ return T instance produced by OutputAdapter */ public synchronized < T > T output ( @ NonNull INDArray inputs , INDArray inputMasks , INDArray labelMasks , @ NonNull OutputAdapter < T > outputAdapter ) { } }
try ( val ws = Nd4j . getWorkspaceManager ( ) . getAndActivateWorkspace ( WS_ALL_LAYERS_ACT_CONFIG , WS_OUTPUT_MEM ) ) { if ( outputAdapter instanceof ModelAdapter ) return ( ( ModelAdapter < T > ) outputAdapter ) . apply ( this , new INDArray [ ] { inputs } , new INDArray [ ] { inputMasks } , new INDArray [ ] { labelMasks } ) ; else return outputAdapter . apply ( output ( inputs , false , inputMasks , labelMasks , ws ) ) ; }
public class OverrideHelper { /** * Returns the resolved features that are defined in the given < code > context type < / code > and its supertypes . * Considers private methods of super types , too . * @ param contextType the context type . Has to be contained in a resource . * @ return the resolved features . */ public ResolvedFeatures getResolvedFeatures ( JvmTypeReference contextType ) { } }
ITypeReferenceOwner owner = new StandardTypeReferenceOwner ( services , contextType . eResource ( ) . getResourceSet ( ) ) ; return getResolvedFeatures ( owner . toLightweightTypeReference ( contextType ) ) ;
public class Buffer { /** * Reads a byte array from the buffer , looks for a 0 to end the array . * @ return the read array */ public byte [ ] readBytesNullEnd ( ) { } }
int initialPosition = position ; int cnt = 0 ; while ( remaining ( ) > 0 && ( buf [ position ++ ] != 0 ) ) { cnt ++ ; } final byte [ ] tmpArr = new byte [ cnt ] ; System . arraycopy ( buf , initialPosition , tmpArr , 0 , cnt ) ; return tmpArr ;
public class Mediawiki { /** * show the Version */ public static void showVersion ( ) { } }
System . err . println ( "Mediawiki-Japi Version: " + VERSION ) ; System . err . println ( ) ; System . err . println ( " github: https://github.com/WolfgangFahl/Mediawiki-Japi" ) ; System . err . println ( "" ) ;
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link InverseType } { @ code > } } */ @ XmlElementDecl ( namespace = "http://www.w3.org/1998/Math/MathML" , name = "inverse" ) public JAXBElement < InverseType > createInverse ( InverseType value ) { } }
return new JAXBElement < InverseType > ( _Inverse_QNAME , InverseType . class , null , value ) ;
public class JBBPTextWriter { /** * Print integer value * @ param value value to be printed * @ return the context * @ throws IOException it will be thrown for transport error */ public JBBPTextWriter Int ( final int value ) throws IOException { } }
ensureValueMode ( ) ; String convertedByExtras = null ; for ( final Extra e : this . extras ) { convertedByExtras = e . doConvertIntToStr ( this , value ) ; if ( convertedByExtras != null ) { break ; } } if ( convertedByExtras == null ) { final long valueToWrite ; if ( this . byteOrder == JBBPByteOrder . LITTLE_ENDIAN ) { valueToWrite = JBBPUtils . reverseByteOrder ( value , 4 ) ; } else { valueToWrite = value ; } printValueString ( JBBPUtils . ensureMinTextLength ( JBBPUtils . ulong2str ( valueToWrite & 0xFFFFFFFFL , this . radix , CHAR_BUFFER ) , this . maxCharsRadixForInt , '0' , 0 ) ) ; } else { printValueString ( convertedByExtras ) ; } return this ;
public class URLParser { /** * Converts a relative URL to absolute URL . * @ param baseURL * @ param relative * @ return */ public static URL toAbsolute ( URL baseURL , String relative ) { } }
try { return new URL ( baseURL , relative ) ; } catch ( MalformedURLException ex ) { throw new RuntimeException ( ex ) ; }
public class PercentileStatistics { /** * Gets percentile . * @ param percentile the percentile * @ return the percentile */ public synchronized Double getPercentile ( final double percentile ) { } }
if ( null == values ) return Double . NaN ; return values . parallelStream ( ) . flatMapToDouble ( x -> Arrays . stream ( x ) ) . sorted ( ) . skip ( ( int ) ( percentile * values . size ( ) ) ) . findFirst ( ) . orElse ( Double . NaN ) ;
public class CmsHtmlWidget { /** * Returns the WYSIWYG editor configuration as a JSON object . < p > * @ param widgetOptions the options for the wysiwyg widget * @ param cms the OpenCms context * @ param resource the edited resource * @ param contentLocale the edited content locale * @ return the configuration */ public static JSONObject getJSONConfiguration ( CmsHtmlWidgetOption widgetOptions , CmsObject cms , CmsResource resource , Locale contentLocale ) { } }
JSONObject result = new JSONObject ( ) ; CmsEditorDisplayOptions options = OpenCms . getWorkplaceManager ( ) . getEditorDisplayOptions ( ) ; Properties displayOptions = options . getDisplayOptions ( cms ) ; try { if ( options . showElement ( "gallery.enhancedoptions" , displayOptions ) ) { result . put ( "cmsGalleryEnhancedOptions" , true ) ; } if ( options . showElement ( "gallery.usethickbox" , displayOptions ) ) { result . put ( "cmsGalleryUseThickbox" , true ) ; } if ( widgetOptions . isAllowScripts ( ) ) { result . put ( "allowscripts" , Boolean . TRUE ) ; } result . put ( "fullpage" , widgetOptions . isFullPage ( ) ) ; List < String > toolbarItems = widgetOptions . getButtonBarShownItems ( ) ; result . put ( "toolbar_items" , toolbarItems ) ; Locale workplaceLocale = OpenCms . getWorkplaceManager ( ) . getWorkplaceLocale ( cms ) ; result . put ( "language" , workplaceLocale . getLanguage ( ) ) ; String editorHeight = widgetOptions . getEditorHeight ( ) ; if ( CmsStringUtil . isNotEmptyOrWhitespaceOnly ( editorHeight ) ) { editorHeight = editorHeight . replaceAll ( "px" , "" ) ; result . put ( "height" , editorHeight ) ; } // set CSS style sheet for current editor widget if configured boolean cssConfigured = false ; String cssPath = "" ; if ( widgetOptions . useCss ( ) ) { cssPath = widgetOptions . getCssPath ( ) ; // set the CSS path to null ( the created configuration String passed to JS will not include this path then ) widgetOptions . setCssPath ( null ) ; cssConfigured = true ; } else if ( OpenCms . getWorkplaceManager ( ) . getEditorCssHandlers ( ) . size ( ) > 0 ) { Iterator < I_CmsEditorCssHandler > i = OpenCms . getWorkplaceManager ( ) . getEditorCssHandlers ( ) . iterator ( ) ; try { String editedResourceSitePath = resource == null ? null : cms . getSitePath ( resource ) ; while ( i . hasNext ( ) ) { I_CmsEditorCssHandler handler = i . next ( ) ; if ( handler . matches ( cms , editedResourceSitePath ) ) { cssPath = handler . getUriStyleSheet ( cms , editedResourceSitePath ) ; if ( CmsStringUtil . isNotEmptyOrWhitespaceOnly ( cssPath ) ) { cssConfigured = true ; } break ; } } } catch ( Exception e ) { // ignore , CSS could not be set } } if ( cssConfigured ) { result . put ( "content_css" , OpenCms . getLinkManager ( ) . substituteLink ( cms , cssPath ) ) ; } if ( widgetOptions . showStylesFormat ( ) ) { try { CmsFile file = cms . readFile ( widgetOptions . getStylesFormatPath ( ) ) ; String characterEncoding = OpenCms . getSystemInfo ( ) . getDefaultEncoding ( ) ; result . put ( "style_formats" , new String ( file . getContents ( ) , characterEncoding ) ) ; } catch ( CmsException cmsException ) { LOG . error ( "Can not open file:" + widgetOptions . getStylesFormatPath ( ) , cmsException ) ; } catch ( UnsupportedEncodingException ex ) { LOG . error ( ex ) ; } } if ( widgetOptions . isImportCss ( ) ) { result . put ( "importCss" , true ) ; } String formatSelectOptions = widgetOptions . getFormatSelectOptions ( ) ; if ( ! CmsStringUtil . isEmpty ( formatSelectOptions ) && ! widgetOptions . isButtonHidden ( CmsHtmlWidgetOption . OPTION_FORMATSELECT ) ) { result . put ( "block_formats" , getTinyMceBlockFormats ( formatSelectOptions ) ) ; } Boolean pasteText = Boolean . valueOf ( OpenCms . getWorkplaceManager ( ) . getWorkplaceEditorManager ( ) . getEditorParameter ( cms , "tinymce" , "paste_text" ) ) ; JSONObject directOptions = new JSONObject ( ) ; directOptions . put ( "paste_text_sticky_default" , pasteText ) ; directOptions . put ( "paste_text_sticky" , pasteText ) ; result . put ( "tinyMceOptions" , directOptions ) ; // if spell checking is enabled , add the spell handler URL if ( OpenCmsSpellcheckHandler . isSpellcheckingEnabled ( ) ) { result . put ( "spellcheck_url" , OpenCms . getLinkManager ( ) . substituteLinkForUnknownTarget ( cms , OpenCmsSpellcheckHandler . getSpellcheckHandlerPath ( ) ) ) ; result . put ( "spellcheck_language" , contentLocale . getLanguage ( ) ) ; } } catch ( JSONException e ) { LOG . error ( e . getLocalizedMessage ( ) , e ) ; } return result ;
public class Ray3D { /** * Updates the ray orientation and position to the specified data . * @ param ray Ray3DFloat object defining the placement data of the ray . */ public void update ( final Ray3DFloat ray ) { } }
final Point3D origin = VecToPoint ( ray . getOrigin ( ) ) ; final Point3D direction = VecToPoint ( ray . getDirection ( ) ) ; final Point3D end = origin . add ( direction . normalize ( ) . multiply ( rayLength ) ) ; super . setStartEndPoints ( origin , end ) ;
public class MoreExecutors { /** * Creates a { @ link ScheduledExecutorService } whose { @ code submit } and { @ code * invokeAll } methods submit { @ link ListenableFutureTask } instances to the * given delegate executor . Those methods , as well as { @ code execute } and * { @ code invokeAny } , are implemented in terms of calls to { @ code * delegate . execute } . All other methods are forwarded unchanged to the * delegate . This implies that the returned { @ code * ListeningScheduledExecutorService } never calls the delegate ' s { @ code * submit } , { @ code invokeAll } , and { @ code invokeAny } methods , so any special * handling of tasks must be implemented in the delegate ' s { @ code execute } * method or by wrapping the returned { @ code * ListeningScheduledExecutorService } . * < p > If the delegate executor was already an instance of { @ code * ListeningScheduledExecutorService } , it is returned untouched , and the rest * of this documentation does not apply . * @ since 10.0 */ public static ListeningScheduledExecutorService listeningDecorator ( ScheduledExecutorService delegate ) { } }
return ( delegate instanceof ListeningScheduledExecutorService ) ? ( ListeningScheduledExecutorService ) delegate : new ScheduledListeningDecorator ( delegate ) ;
public class AccessSet { /** * Read the related { @ link org . efaps . admin . datamodel . Type } . * @ throws CacheReloadException on error */ private void readLinks2DMTypes ( ) throws CacheReloadException { } }
Connection con = null ; try { final List < Long > values = new ArrayList < > ( ) ; con = Context . getConnection ( ) ; PreparedStatement stmt = null ; try { stmt = con . prepareStatement ( AccessSet . SQL_SET2DMTYPE ) ; stmt . setObject ( 1 , getId ( ) ) ; final ResultSet rs = stmt . executeQuery ( ) ; while ( rs . next ( ) ) { values . add ( rs . getLong ( 1 ) ) ; } rs . close ( ) ; } finally { if ( stmt != null ) { stmt . close ( ) ; } } con . commit ( ) ; for ( final Long dataModelTypeId : values ) { final Type dataModelType = Type . get ( dataModelTypeId ) ; if ( dataModelType == null ) { AccessSet . LOG . error ( "could not found data model type with id " + "'" + dataModelTypeId + "'" ) ; } else { AccessSet . LOG . debug ( "read link from AccessSet '{}' (id = {}, uuid = {}) to DataModelType '{}' (id = {} uuid = {})" , getName ( ) , getId ( ) , getUUID ( ) , dataModelType . getName ( ) , dataModelType . getId ( ) , dataModelType . getUUID ( ) ) ; this . dataModelTypes . add ( dataModelType . getId ( ) ) ; dataModelType . addAccessSet ( this ) ; } } } catch ( final SQLException e ) { throw new CacheReloadException ( "could not read roles" , e ) ; } catch ( final EFapsException e ) { throw new CacheReloadException ( "could not read roles" , e ) ; } finally { try { if ( con != null && ! con . isClosed ( ) ) { con . close ( ) ; } } catch ( final SQLException e ) { throw new CacheReloadException ( "Cannot read a type for an attribute." , e ) ; } }
public class GetDevicePoolCompatibilityResult { /** * Information about compatible devices . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setCompatibleDevices ( java . util . Collection ) } or { @ link # withCompatibleDevices ( java . util . Collection ) } if * you want to override the existing values . * @ param compatibleDevices * Information about compatible devices . * @ return Returns a reference to this object so that method calls can be chained together . */ public GetDevicePoolCompatibilityResult withCompatibleDevices ( DevicePoolCompatibilityResult ... compatibleDevices ) { } }
if ( this . compatibleDevices == null ) { setCompatibleDevices ( new java . util . ArrayList < DevicePoolCompatibilityResult > ( compatibleDevices . length ) ) ; } for ( DevicePoolCompatibilityResult ele : compatibleDevices ) { this . compatibleDevices . add ( ele ) ; } return this ;
public class ChannelUpdateHandler { /** * Handles a server text channel update . * @ param jsonChannel The json channel data . */ private void handleServerTextChannel ( JsonNode jsonChannel ) { } }
long channelId = jsonChannel . get ( "id" ) . asLong ( ) ; api . getTextChannelById ( channelId ) . map ( c -> ( ( ServerTextChannelImpl ) c ) ) . ifPresent ( channel -> { String oldTopic = channel . getTopic ( ) ; String newTopic = jsonChannel . has ( "topic" ) && ! jsonChannel . get ( "topic" ) . isNull ( ) ? jsonChannel . get ( "topic" ) . asText ( ) : "" ; if ( ! oldTopic . equals ( newTopic ) ) { channel . setTopic ( newTopic ) ; ServerTextChannelChangeTopicEvent event = new ServerTextChannelChangeTopicEventImpl ( channel , newTopic , oldTopic ) ; api . getEventDispatcher ( ) . dispatchServerTextChannelChangeTopicEvent ( ( DispatchQueueSelector ) channel . getServer ( ) , channel . getServer ( ) , channel , event ) ; } boolean oldNsfwFlag = channel . isNsfw ( ) ; boolean newNsfwFlag = jsonChannel . get ( "nsfw" ) . asBoolean ( ) ; if ( oldNsfwFlag != newNsfwFlag ) { channel . setNsfwFlag ( newNsfwFlag ) ; ServerChannelChangeNsfwFlagEvent event = new ServerChannelChangeNsfwFlagEventImpl ( channel , newNsfwFlag , oldNsfwFlag ) ; api . getEventDispatcher ( ) . dispatchServerChannelChangeNsfwFlagEvent ( ( DispatchQueueSelector ) channel . getServer ( ) , null , channel . getServer ( ) , channel , event ) ; } int oldSlowmodeDelay = channel . getSlowmodeDelayInSeconds ( ) ; int newSlowmodeDelay = jsonChannel . get ( "rate_limit_per_user" ) . asInt ( 0 ) ; if ( oldSlowmodeDelay != newSlowmodeDelay ) { channel . setSlowmodeDelayInSeconds ( newSlowmodeDelay ) ; ServerTextChannelChangeSlowmodeEvent event = new ServerTextChannelChangeSlowmodeEventImpl ( channel , oldSlowmodeDelay , newSlowmodeDelay ) ; api . getEventDispatcher ( ) . dispatchServerTextChannelChangeSlowmodeEvent ( ( DispatchQueueSelector ) channel . getServer ( ) , channel . getServer ( ) , channel , event ) ; } } ) ;
public class ServerCommsDiagnosticModule { /** * Dumps the particulars of a ME to ME client side conversation . * @ param is the incident stream to log information to . * @ param conv the conversation we want to dump . */ private void dumpMEtoMEConversation ( IncidentStream is , Conversation conv ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "dumpMEtoMEConversation" , new Object [ ] { is , conv } ) ; // Get the conversation state and use it to find out what we can . final ConversationState convState = ( ConversationState ) conv . getAttachment ( ) ; final MEConnection commsConnection = ( MEConnection ) convState . getCommsConnection ( ) ; is . writeLine ( " Connected using: " , commsConnection ) ; final JsMessagingEngine me = commsConnection . getMessagingEngine ( ) ; final String meInfo = me == null ? "<null>" : me . getName ( ) + " [" + me . getUuid ( ) + "]" ; is . writeLine ( " Local ME: " , meInfo ) ; is . writeLine ( " Target ME: " , commsConnection . getTargetInformation ( ) ) ; // Introspect details of conversation state . is . introspectAndWriteLine ( "Introspection of the conversation state:" , convState ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "dumpMEtoMEConversation" ) ;
public class SessionHandle { /** * This method will close all peer connections associated with the torrent and tell the * tracker that we ' ve stopped participating in the swarm . This operation cannot fail . * When it completes , you will receive a torrent _ removed _ alert . * The optional second argument options can be used to delete all the files downloaded * by this torrent . To do so , pass in the value session : : delete _ files . The removal of * the torrent is asynchronous , there is no guarantee that adding the same torrent immediately * after it was removed will not throw a libtorrent _ exception exception . Once the torrent * is deleted , a torrent _ deleted _ alert is posted . * @ param th the handle */ public void removeTorrent ( TorrentHandle th , remove_flags_t options ) { } }
if ( th . isValid ( ) ) { s . remove_torrent ( th . swig ( ) , options ) ; }
public class XReadArgs { /** * Perform a blocking read and wait up to a { @ link Duration timeout } for a new stream message . * @ param timeout max time to wait . * @ return { @ code this } . */ public XReadArgs block ( Duration timeout ) { } }
LettuceAssert . notNull ( timeout , "Block timeout must not be null" ) ; return block ( timeout . toMillis ( ) ) ;
public class DesignatedHostSslVerifier { /** * Idempotent . */ public static synchronized void setupSslVerification ( String host ) throws Exception { } }
if ( sslVerificationHosts == null ) sslVerificationHosts = new ArrayList < String > ( ) ; if ( ! sslVerificationHosts . contains ( host ) ) { TrustManagerFactory tmf = TrustManagerFactory . getInstance ( TrustManagerFactory . getDefaultAlgorithm ( ) ) ; // initialize tmf with the default trust store tmf . init ( ( KeyStore ) null ) ; // get the default trust manager X509TrustManager defaultTm = null ; for ( TrustManager tm : tmf . getTrustManagers ( ) ) { if ( tm instanceof X509TrustManager ) { defaultTm = ( X509TrustManager ) tm ; break ; } } TrustManager [ ] trustManager = new TrustManager [ ] { new BlindTrustManager ( defaultTm , host ) } ; SSLContext sc = SSLContext . getInstance ( "SSL" ) ; sc . init ( null , trustManager , new java . security . SecureRandom ( ) ) ; HttpsURLConnection . setDefaultSSLSocketFactory ( sc . getSocketFactory ( ) ) ; HostnameVerifier defaultHv = HttpsURLConnection . getDefaultHostnameVerifier ( ) ; HostnameVerifier hostnameVerifier = new DesignatedHostnameVerifier ( defaultHv , host ) ; HttpsURLConnection . setDefaultHostnameVerifier ( hostnameVerifier ) ; sslVerificationHosts . add ( host ) ; }
public class UnifiedListenerManager { /** * Attach the { @ code listener } to this manager and enqueue the task if it isn ' t pending or * running . * @ param task the task will be enqueue if it isn ' t running . * @ param listener the listener will be attach to this manager . */ public synchronized void attachAndEnqueueIfNotRun ( @ NonNull DownloadTask task , @ NonNull DownloadListener listener ) { } }
attachListener ( task , listener ) ; if ( ! isTaskPendingOrRunning ( task ) ) { task . enqueue ( hostListener ) ; }
public class JsonUtils { /** * Writes the given JSON - LD Object out to the given Writer , using * indentation and new lines to improve readability . * @ param writer * The writer that is to receive the serialized JSON - LD object . * @ param jsonObject * The JSON - LD Object to serialize . * @ throws JsonGenerationException * If there is a JSON error during serialization . * @ throws IOException * If there is an IO error during serialization . */ public static void writePrettyPrint ( Writer writer , Object jsonObject ) throws JsonGenerationException , IOException { } }
final JsonGenerator jw = JSON_FACTORY . createGenerator ( writer ) ; jw . useDefaultPrettyPrinter ( ) ; jw . writeObject ( jsonObject ) ;
public class SenderWorker { /** * Receives a < code > ProtocolDataUnit < / code > from the socket and appends it to the end of the receiving queue of this * connection . * @ return Queue with the resulting units * @ throws IOException if an I / O error occurs . * @ throws InternetSCSIException if any violation of the iSCSI - Standard emerge . * @ throws DigestException if a mismatch of the digest exists . */ public ProtocolDataUnit receiveFromWire ( ) throws DigestException , InternetSCSIException , IOException { } }
final ProtocolDataUnit protocolDataUnit = protocolDataUnitFactory . create ( connection . getSetting ( OperationalTextKey . HEADER_DIGEST ) , connection . getSetting ( OperationalTextKey . DATA_DIGEST ) ) ; try { protocolDataUnit . read ( socketChannel ) ; } catch ( ClosedChannelException e ) { throw new InternetSCSIException ( e ) ; } LOGGER . debug ( "Receiving this PDU: " + protocolDataUnit ) ; final Exception isCorrect = connection . getState ( ) . isCorrect ( protocolDataUnit ) ; if ( isCorrect == null ) { LOGGER . trace ( "Adding PDU to Receiving Queue." ) ; final TargetMessageParser parser = ( TargetMessageParser ) protocolDataUnit . getBasicHeaderSegment ( ) . getParser ( ) ; final Session session = connection . getSession ( ) ; // the PDU maxCmdSN is greater than the local maxCmdSN , so we // have to update the local one if ( session . getMaximumCommandSequenceNumber ( ) . compareTo ( parser . getMaximumCommandSequenceNumber ( ) ) < 0 ) { session . setMaximumCommandSequenceNumber ( parser . getMaximumCommandSequenceNumber ( ) ) ; } // the PDU expCmdSN is greater than the local expCmdSN , so we // have to update the local one if ( parser . incrementSequenceNumber ( ) ) { if ( connection . getExpectedStatusSequenceNumber ( ) . compareTo ( parser . getStatusSequenceNumber ( ) ) >= 0 ) { connection . incrementExpectedStatusSequenceNumber ( ) ; } else { LOGGER . error ( "Status Sequence Number Mismatch (received, expected): " + parser . getStatusSequenceNumber ( ) + ", " + ( connection . getExpectedStatusSequenceNumber ( ) . getValue ( ) - 1 ) ) ; } } } else { throw new InternetSCSIException ( isCorrect ) ; } return protocolDataUnit ;
public class InternalCallContextFactory { /** * Create an internal tenant callcontext * @ param tenantRecordId tenant _ record _ id ( cannot be null ) * @ param accountRecordId account _ record _ id ( cannot be null for INSERT operations ) * @ return internal tenant callcontext */ public InternalTenantContext createInternalTenantContext ( final Long tenantRecordId , @ Nullable final Long accountRecordId ) { } }
populateMDCContext ( null , accountRecordId , tenantRecordId ) ; if ( accountRecordId == null ) { return new InternalTenantContext ( tenantRecordId ) ; } else { final ImmutableAccountData immutableAccountData = getImmutableAccountData ( accountRecordId , tenantRecordId ) ; final DateTimeZone fixedOffsetTimeZone = immutableAccountData . getFixedOffsetTimeZone ( ) ; final DateTime referenceTime = immutableAccountData . getReferenceTime ( ) ; return new InternalTenantContext ( tenantRecordId , accountRecordId , fixedOffsetTimeZone , referenceTime ) ; }
public class RaftServiceManager { /** * Applies a query entry to the state machine . * Query entries are applied to the user { @ link PrimitiveService } for read - only operations . Because queries are * read - only , they may only be applied on a single server in the cluster , and query entries do not go through the Raft * log . Thus , it is critical that measures be taken to ensure clients see a consistent view of the cluster event when * switching servers . To do so , clients provide a sequence and version number for each query . The sequence number is * the order in which the query was sent by the client . Sequence numbers are shared across both commands and queries . * The version number indicates the last index for which the client saw a command or query response . In the event that * the lastApplied index of this state machine does not meet the provided version number , we wait for the state * machine to catch up before applying the query . This ensures clients see state progress monotonically even when * switching servers . * Because queries may only be applied on a single server in the cluster they cannot result in the publishing of * session events . Events require commands to be written to the Raft log to ensure fault - tolerance and consistency * across the cluster . */ private CompletableFuture < OperationResult > applyQuery ( Indexed < QueryEntry > entry ) { } }
RaftSession session = raft . getSessions ( ) . getSession ( entry . entry ( ) . session ( ) ) ; // If the session is null then that indicates that the session already timed out or it never existed . // Return with an UnknownSessionException . if ( session == null ) { logger . warn ( "Unknown session: " + entry . entry ( ) . session ( ) ) ; return Futures . exceptionalFuture ( new RaftException . UnknownSession ( "unknown session " + entry . entry ( ) . session ( ) ) ) ; } // Execute the query using the state machine associated with the session . return session . getService ( ) . executeQuery ( entry . index ( ) , entry . entry ( ) . sequenceNumber ( ) , entry . entry ( ) . timestamp ( ) , session , entry . entry ( ) . operation ( ) ) ;
public class ReflectCache { /** * 得到服务的自定义ClassLoader * @ param serviceUniqueName 服务唯一名称 * @ return 服务级别ClassLoader */ public static ClassLoader getServiceClassLoader ( String serviceUniqueName ) { } }
ClassLoader appClassLoader = SERVICE_CLASSLOADER_MAP . get ( serviceUniqueName ) ; if ( appClassLoader == null ) { return ClassLoaderUtils . getCurrentClassLoader ( ) ; } else { return appClassLoader ; }
public class PolicyTopicEvidence { /** * Sets the policyTopicEvidenceType value for this PolicyTopicEvidence . * @ param policyTopicEvidenceType * The type of evidence for the policy topic . */ public void setPolicyTopicEvidenceType ( com . google . api . ads . adwords . axis . v201809 . cm . PolicyTopicEvidenceType policyTopicEvidenceType ) { } }
this . policyTopicEvidenceType = policyTopicEvidenceType ;
public class CodeGeneratorMain { /** * Main Entry * @ param args * @ throws Exception */ public static void main ( String [ ] args ) throws Exception { } }
final CodeGeneratorConfig config = new CodeGeneratorConfig ( ) ; final JCommander commander = new JCommander ( config ) ; commander . parse ( args ) ; if ( config . help ) { commander . usage ( ) ; System . exit ( 0 ) ; } // make sure the logger is initialized CodeGeneratorLoggerFactory . setLogger ( new CodeGeneratorSimpleLogger ( ) ) ; // iff baseFolder is null or empty , reset it to the default value config . baseFolder = StringUtils . defaultIfEmpty ( config . baseFolder , "GUIData" ) ; // create the helper with our config CodeGeneratorHelper helper = new CodeGeneratorHelper ( config ) ; if ( CodeGeneratorLoggerFactory . getLogger ( ) . isDebugEnabled ( ) ) { helper . displayProjectInformation ( ) ; } // register all custom element classes helper . registerCustomElements ( ) ; // process the files . helper . processFiles ( ) ;