signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class EdgeAccess { /** * This method disconnects the specified edge from the list of edges of the specified node . It * does not release the freed space to be reused . * @ param edgeToUpdatePointer if it is negative then the nextEdgeId will be saved to refToEdges of nodes */ final long internalEdgeDisconnect ( int edgeToRemove , long edgeToUpdatePointer , int baseNode ) { } }
long edgeToRemovePointer = toPointer ( edgeToRemove ) ; // an edge is shared across the two nodes even if the edge is not in both directions // so we need to know two edge - pointers pointing to the edge before edgeToRemovePointer int nextEdgeId = getNodeA ( edgeToRemovePointer ) == baseNode ? getLinkA ( edgeToRemovePointer ) : getLinkB ( edgeToRemovePointer ) ; if ( edgeToUpdatePointer < 0 ) { setEdgeRef ( baseNode , nextEdgeId ) ; } else { // adjNode is different for the edge we want to update with the new link long link = getNodeA ( edgeToUpdatePointer ) == baseNode ? edgeToUpdatePointer + E_LINKA : edgeToUpdatePointer + E_LINKB ; edges . setInt ( link , nextEdgeId ) ; } return edgeToRemovePointer ;
public class ServerDnsAliasesInner { /** * Creates a server dns alias . * @ param resourceGroupName The name of the resource group that contains the resource . You can obtain this value from the Azure Resource Manager API or the portal . * @ param serverName The name of the server that the alias is pointing to . * @ param dnsAliasName The name of the server DNS alias . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < ServerDnsAliasInner > beginCreateOrUpdateAsync ( String resourceGroupName , String serverName , String dnsAliasName , final ServiceCallback < ServerDnsAliasInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( beginCreateOrUpdateWithServiceResponseAsync ( resourceGroupName , serverName , dnsAliasName ) , serviceCallback ) ;
public class cachepolicylabel { /** * Use this API to fetch cachepolicylabel resource of given name . */ public static cachepolicylabel get ( nitro_service service , String labelname ) throws Exception { } }
cachepolicylabel obj = new cachepolicylabel ( ) ; obj . set_labelname ( labelname ) ; cachepolicylabel response = ( cachepolicylabel ) obj . get_resource ( service ) ; return response ;
public class InfoValues { /** * Returns values . < br > * This method use class InfoLine . < br > * This method is not intended for external use . < br > * @ return */ public String getValues ( ) { } }
String info = "" ; for ( int i = 0 ; i < vsL . size ( ) ; i ++ ) { info += vsL . get ( i ) + "|" ; } return info ;
public class MockClassLoader { /** * Access an instance of our class loader . * @ return singleton */ public static MockClassLoader getInstance ( ) { } }
if ( MockClassLoader . instance == null ) { MockClassLoader . instance = new MockClassLoader ( ) ; } return MockClassLoader . instance ;
public class CommandLine { /** * Apply configuration to the container . * < p > Applies configuration from < code > Key . SERVER _ CONFIG < / code > and < code > Key . STAGE _ CONFIG < / code > . < / p > * @ param swarm Swarm instance to configure . * @ throws MalformedURLException If a URL is attempted to be read and fails . */ public void applyConfigurations ( Swarm swarm ) throws IOException { } }
if ( get ( SERVER_CONFIG ) != null ) { swarm . withXmlConfig ( get ( SERVER_CONFIG ) ) ; } if ( get ( CONFIG ) != null ) { List < URL > configs = get ( CONFIG ) ; for ( URL config : configs ) { swarm . withConfig ( config ) ; } } if ( get ( PROFILES ) != null ) { List < String > profiles = get ( PROFILES ) ; for ( String profile : profiles ) { swarm . withProfile ( profile ) ; } }
public class ClientEncoders { /** * Factory method for arguments writers . Current implementation choose the { @ link ArgumentsWriter } implementation based on * given arguments list . It is caller responsibility to ensure arguments order and types match remote method signature . * Heuristic to determine arguments writer : * < ul > * < li > uses { @ link XmlArgumentsWriter } if there is a single argument of type { @ link Document } , * < li > uses { @ link StreamArgumentsWriter } if there is a single argument of type { @ link StreamHandler } , * < li > uses { @ link MixedArgumentsWriter } if there are more documents and / or streams present , * < li > otherwise uses { @ link JsonArgumentsWriter } . * < / ul > * @ param arguments invocation arguments list , not null or empty . * @ return parameters encoder instance . * @ throws IllegalArgumentException if given arguments list is null or empty . */ public ArgumentsWriter getArgumentsWriter ( Object [ ] arguments ) { } }
Params . notNullOrEmpty ( arguments , "Arguments" ) ; int streams = 0 ; int documents = 0 ; for ( Object argument : arguments ) { if ( argument instanceof StreamHandler ) { ++ streams ; } if ( argument instanceof Document ) { ++ documents ; } } if ( arguments . length == 1 ) { if ( documents != 0 ) { return new XmlArgumentsWriter ( ) ; } if ( streams != 0 ) { return new StreamArgumentsWriter ( ) ; } } if ( documents != 0 ) { return new MixedArgumentsWriter ( ) ; } if ( streams != 0 ) { return new MixedArgumentsWriter ( ) ; } return new JsonArgumentsWriter ( ) ;
public class DataSourceProxyFactory { public void init ( ) throws Exception { } }
filter = bundleContext . createFilter ( getFilter ( dsName ) ) ; tracker = new ServiceTracker < DataSource , DataSource > ( bundleContext , filter , null ) ; tracker . open ( ) ; proxy = ( DataSource ) Proxy . newProxyInstance ( DataSourceProxyFactory . class . getClassLoader ( ) , new Class < ? > [ ] { DataSource . class } , this ) ;
public class AWSOpsWorksClient { /** * Registers an Amazon EBS volume with a specified stack . A volume can be registered with only one stack at a time . * If the volume is already registered , you must first deregister it by calling < a > DeregisterVolume < / a > . For more * information , see < a href = " http : / / docs . aws . amazon . com / opsworks / latest / userguide / resources . html " > Resource * Management < / a > . * < b > Required Permissions < / b > : To use this action , an IAM user must have a Manage permissions level for the stack , * or an attached policy that explicitly grants permissions . For more information on user permissions , see < a * href = " http : / / docs . aws . amazon . com / opsworks / latest / userguide / opsworks - security - users . html " > Managing User * Permissions < / a > . * @ param registerVolumeRequest * @ return Result of the RegisterVolume operation returned by the service . * @ throws ValidationException * Indicates that a request was not valid . * @ throws ResourceNotFoundException * Indicates that a resource was not found . * @ sample AWSOpsWorks . RegisterVolume * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / opsworks - 2013-02-18 / RegisterVolume " target = " _ top " > AWS API * Documentation < / a > */ @ Override public RegisterVolumeResult registerVolume ( RegisterVolumeRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeRegisterVolume ( request ) ;
public class KMedoidsPark { /** * Run k - medoids * @ param database Database * @ param relation relation to use * @ return result */ public Clustering < MedoidModel > run ( Database database , Relation < V > relation ) { } }
DistanceQuery < V > distQ = DatabaseUtil . precomputedDistanceQuery ( database , relation , getDistanceFunction ( ) , LOG ) ; // Choose initial medoids if ( LOG . isStatistics ( ) ) { LOG . statistics ( new StringStatistic ( KEY + ".initialization" , initializer . toString ( ) ) ) ; } ArrayModifiableDBIDs medoids = DBIDUtil . newArray ( initializer . chooseInitialMedoids ( k , relation . getDBIDs ( ) , distQ ) ) ; DBIDArrayMIter miter = medoids . iter ( ) ; double [ ] mdists = new double [ k ] ; // Setup cluster assignment store List < ModifiableDBIDs > clusters = new ArrayList < > ( ) ; for ( int i = 0 ; i < k ; i ++ ) { HashSetModifiableDBIDs set = DBIDUtil . newHashSet ( relation . size ( ) / k ) ; set . add ( miter . seek ( i ) ) ; // Add medoids . clusters . add ( set ) ; } // Initial assignment to nearest medoids // TODO : reuse this information , from the build phase , when possible ? double tc = assignToNearestCluster ( miter , mdists , clusters , distQ ) ; if ( LOG . isStatistics ( ) ) { LOG . statistics ( new DoubleStatistic ( KEY + ".iteration-" + 0 + ".cost" , tc ) ) ; } IndefiniteProgress prog = LOG . isVerbose ( ) ? new IndefiniteProgress ( "K-Medoids EM iteration" , LOG ) : null ; // Swap phase int iteration = 0 ; DBIDVar best = DBIDUtil . newVar ( ) ; while ( true ) { boolean changed = false ; // Try to swap the medoid with a better cluster member : int i = 0 ; for ( miter . seek ( 0 ) ; miter . valid ( ) ; miter . advance ( ) , i ++ ) { best . unset ( ) ; double bestm = mdists [ i ] ; for ( DBIDIter iter = clusters . get ( i ) . iter ( ) ; iter . valid ( ) ; iter . advance ( ) ) { if ( DBIDUtil . equal ( miter , iter ) ) { continue ; } double sum = 0 ; for ( DBIDIter iter2 = clusters . get ( i ) . iter ( ) ; iter2 . valid ( ) ; iter2 . advance ( ) ) { if ( DBIDUtil . equal ( iter , iter2 ) ) { continue ; } sum += distQ . distance ( iter , iter2 ) ; } if ( sum < bestm ) { best . set ( iter ) ; bestm = sum ; } } if ( best . isSet ( ) && ! DBIDUtil . equal ( miter , best ) ) { changed = true ; assert ( clusters . get ( i ) . contains ( best ) ) ; medoids . set ( i , best ) ; mdists [ i ] = bestm ; } } if ( ! changed ) { break ; // Converged } // Reassign double nc = assignToNearestCluster ( miter , mdists , clusters , distQ ) ; ++ iteration ; if ( LOG . isStatistics ( ) ) { LOG . statistics ( new DoubleStatistic ( KEY + ".iteration-" + iteration + ".cost" , nc ) ) ; } LOG . incrementProcessed ( prog ) ; } LOG . setCompleted ( prog ) ; if ( LOG . isStatistics ( ) ) { LOG . statistics ( new LongStatistic ( KEY + ".iterations" , iteration ) ) ; } // Wrap result Clustering < MedoidModel > result = new Clustering < > ( "k-Medoids Clustering" , "kmedoids-clustering" ) ; for ( DBIDArrayIter it = medoids . iter ( ) ; it . valid ( ) ; it . advance ( ) ) { result . addToplevelCluster ( new Cluster < > ( clusters . get ( it . getOffset ( ) ) , new MedoidModel ( DBIDUtil . deref ( it ) ) ) ) ; } return result ;
public class ListDialogDecorator { /** * Attaches the adapter to the dialog ' s list view . */ private void attachAdapter ( ) { } }
if ( listView != null ) { if ( adapter != null ) { listView . setHasFixedSize ( false ) ; listView . setLayoutManager ( layoutManager ) ; listView . setAdapter ( adapter ) ; listView . setVisibility ( adapter != null ? View . VISIBLE : View . GONE ) ; adapter . setOnItemSelectedListener ( listViewItemSelectedListener ) ; initializeSelectionListener ( ) ; initializeCheckedItems ( ) ; adaptItemColor ( ) ; adaptItemTypeface ( ) ; } else { if ( inflatedCustomView ) { getDialog ( ) . setView ( null ) ; } listView = null ; } }
public class MultiIndex { /** * Index optimization using { @ link IndexWriter # optimize ( ) } method . */ public void optimize ( ) throws CorruptIndexException , IOException { } }
for ( PersistentIndex index : indexes ) { IndexWriter writer = index . getIndexWriter ( ) ; writer . forceMerge ( 1 , true ) ; }
public class JdbcCpoAdapter { /** * DOCUMENT ME ! * @ return DOCUMENT ME ! * @ throws CpoException DOCUMENT ME ! */ protected Connection getWriteConnection ( ) throws CpoException { } }
Connection connection ; try { connection = getWriteDataSource ( ) . getConnection ( ) ; connection . setAutoCommit ( false ) ; } catch ( SQLException e ) { String msg = "getWriteConnection(): failed" ; logger . error ( msg , e ) ; throw new CpoException ( msg , e ) ; } return connection ;
public class LCharPredicateBuilder { /** * One of ways of creating builder . This is possibly the least verbose way where compiler should be able to guess the generic parameters . */ @ Nonnull public static LCharPredicate charPredicateFrom ( Consumer < LCharPredicateBuilder > buildingFunction ) { } }
LCharPredicateBuilder builder = new LCharPredicateBuilder ( ) ; buildingFunction . accept ( builder ) ; return builder . build ( ) ;
public class CreateAppProfileRequest { /** * Creates the request protobuf . This method is considered an internal implementation detail and * not meant to be used by applications . */ @ InternalApi public com . google . bigtable . admin . v2 . CreateAppProfileRequest toProto ( String projectId ) { } }
String name = NameUtil . formatInstanceName ( projectId , instanceId ) ; return proto . setParent ( name ) . build ( ) ;
public class AbstractCommandLineRunner { /** * Query the flag for the output charset . * < p > Let the outputCharset be the same as the input charset . . . except if we ' re reading in UTF - 8 * by default . By tradition , we ' ve always output ASCII to avoid various hiccups with different * browsers , proxies and firewalls . * @ return Name of the charset to use when writing outputs . Guaranteed to be a supported charset . * @ throws FlagUsageException if flag is not a valid Charset name . */ @ GwtIncompatible ( "Unnecessary" ) private Charset getLegacyOutputCharset ( ) { } }
if ( ! config . charset . isEmpty ( ) ) { if ( ! Charset . isSupported ( config . charset ) ) { throw new FlagUsageException ( config . charset + " is not a valid charset name." ) ; } return Charset . forName ( config . charset ) ; } return US_ASCII ;
public class BaseMessageHeader { /** * Get the message header data as a XML String . * @ return */ public StringBuffer addXML ( StringBuffer sbXML ) { } }
Util . addStartTag ( sbXML , BaseMessage . HEADER_TAG ) . append ( Constant . RETURN ) ; for ( String strKey : this . getProperties ( ) . keySet ( ) ) { Object data = this . get ( strKey ) ; Util . addStartTag ( sbXML , strKey ) ; sbXML . append ( data ) ; Util . addEndTag ( sbXML , strKey ) . append ( Constant . RETURN ) ; } Util . addEndTag ( sbXML , BaseMessage . HEADER_TAG ) . append ( Constant . RETURN ) ; return sbXML ;
public class AbstractHttp2ConnectionHandlerBuilder { /** * Sets the { @ link Http2Connection } to use . */ protected B connection ( Http2Connection connection ) { } }
enforceConstraint ( "connection" , "maxReservedStreams" , maxReservedStreams ) ; enforceConstraint ( "connection" , "server" , isServer ) ; enforceConstraint ( "connection" , "codec" , decoder ) ; enforceConstraint ( "connection" , "codec" , encoder ) ; this . connection = checkNotNull ( connection , "connection" ) ; return self ( ) ;
public class AddExpandedTextAdWithUpgradedUrls { /** * Runs the example . * @ param adWordsServices the services factory . * @ param session the session . * @ param adGroupId the ID of the ad group where the ad will be created . * @ throws ApiException if the API request failed with one or more service errors . * @ throws RemoteException if the API request failed due to other errors . */ public static void runExample ( AdWordsServicesInterface adWordsServices , AdWordsSession session , long adGroupId ) throws RemoteException { } }
// Get the AdGroupAdService . AdGroupAdServiceInterface adGroupAdService = adWordsServices . get ( session , AdGroupAdServiceInterface . class ) ; // Create expanded text ad with a tracking template and custom parameters . ExpandedTextAd expandedTextAd = new ExpandedTextAd ( ) ; expandedTextAd . setHeadlinePart1 ( "Luxury Cruise to Mars" ) ; expandedTextAd . setHeadlinePart2 ( "Visit the Red Planet in style." ) ; expandedTextAd . setDescription ( "Low-gravity fun for everyone!" ) ; // Specify a tracking url for 3rd party tracking provider . You may // specify one at customer , campaign , ad group , ad , criterion or // feed item levels . expandedTextAd . setTrackingUrlTemplate ( "http://tracker.example.com/?season={_season}&promocode={_promocode}&u={lpurl}" ) ; // Since your tracking url has two custom parameters , provide their // values too . This can be provided at campaign , ad group , ad , criterion // or feed item levels . CustomParameter seasonParameter = new CustomParameter ( ) ; seasonParameter . setKey ( "season" ) ; seasonParameter . setValue ( "christmas" ) ; CustomParameter promoCodeParameter = new CustomParameter ( ) ; promoCodeParameter . setKey ( "promocode" ) ; promoCodeParameter . setValue ( "NYC123" ) ; CustomParameters trackingUrlParameters = new CustomParameters ( ) ; trackingUrlParameters . setParameters ( new CustomParameter [ ] { seasonParameter , promoCodeParameter } ) ; expandedTextAd . setUrlCustomParameters ( trackingUrlParameters ) ; // Specify a list of final urls . This field cannot be set if url field is // set . This may be specified at ad , criterion , and feed item levels . expandedTextAd . setFinalUrls ( new String [ ] { "http://www.example.com/cruise/space/" , "http://www.example.com/locations/mars/" } ) ; // Specify a list of final mobile urls . This field cannot be set if url field is // set or finalUrls is not set . This may be specified at ad , criterion , and feed // item levels . expandedTextAd . setFinalMobileUrls ( new String [ ] { "http://mobile.example.com/cruise/space/" , "http://mobile.example.com/locations/mars/" } ) ; // Create ad group ad . AdGroupAd textAdGroupAd = new AdGroupAd ( ) ; textAdGroupAd . setAdGroupId ( adGroupId ) ; textAdGroupAd . setAd ( expandedTextAd ) ; // Optional : Set status . textAdGroupAd . setStatus ( AdGroupAdStatus . PAUSED ) ; // Create operation . AdGroupAdOperation textAdGroupAdOperation = new AdGroupAdOperation ( ) ; textAdGroupAdOperation . setOperand ( textAdGroupAd ) ; textAdGroupAdOperation . setOperator ( Operator . ADD ) ; AdGroupAdOperation [ ] operations = new AdGroupAdOperation [ ] { textAdGroupAdOperation } ; // Add ad . AdGroupAd adGroupAdResult = adGroupAdService . mutate ( operations ) . getValue ( 0 ) ; // Display ad . System . out . printf ( "Ad with ID %d and tracking URL template '%s' was added." , adGroupAdResult . getAd ( ) . getId ( ) , adGroupAdResult . getAd ( ) . getTrackingUrlTemplate ( ) ) ;
public class DefaultGroovyMethods { /** * Support the subscript operator with an ObjectRange for a double array * @ param array a double array * @ param range an ObjectRange indicating the indices for the items to retrieve * @ return list of the retrieved doubles * @ since 1.0 */ @ SuppressWarnings ( "unchecked" ) public static List < Double > getAt ( double [ ] array , ObjectRange range ) { } }
return primitiveArrayGet ( array , range ) ;
public class IfcPhysicalComplexQuantityImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ SuppressWarnings ( "unchecked" ) public EList < IfcPhysicalQuantity > getHasQuantities ( ) { } }
return ( EList < IfcPhysicalQuantity > ) eGet ( Ifc2x3tc1Package . Literals . IFC_PHYSICAL_COMPLEX_QUANTITY__HAS_QUANTITIES , true ) ;
public class MessageFormatMessageFactory { /** * Creates { @ link org . apache . logging . log4j . message . StringFormattedMessage } instances . * @ param message The message pattern . * @ param params Parameters to the message . * @ return The Message . * @ see org . apache . logging . log4j . message . MessageFactory # newMessage ( String , Object . . . ) */ @ Override public Message newMessage ( final String message , final Object ... params ) { } }
return new MessageFormatMessage ( message , params ) ;
public class DifferentialFunctionFactory { /** * Avg pooling 3d operation . * @ param input the inputs to pooling * @ param pooling3DConfig the configuration * @ return */ public SDVariable avgPooling3d ( SDVariable input , Pooling3DConfig pooling3DConfig ) { } }
pooling3DConfig . setType ( Pooling3D . Pooling3DType . AVG ) ; return pooling3d ( input , pooling3DConfig ) ;
public class TimeIntervalFormatUtil { /** * 切替インターバルの値が閾値内に収まっているかの確認を行う 。 < br > * < br > * 切替インターバル { @ literal 1 < = interval < = 100 } < br > * 切替単位 MillSecond 、 Second 、 Minute 、 Hourのいずれか < br > * @ param interval 切替インターバル * @ param unit 切替単位 * @ return 切替インターバルの値が閾値内に収まっているか */ public static boolean checkValidInterval ( int interval , TimeUnit unit ) { } }
if ( 0 >= interval || INTERVAL_MAX < interval ) { return false ; } switch ( unit ) { case HOURS : break ; case MINUTES : break ; case SECONDS : break ; case MILLISECONDS : break ; default : return false ; } return true ;
public class CmsSiteManagerImpl { /** * Returns a list of all { @ link CmsSite } instances that are compatible to the given organizational unit . < p > * @ param cms the current OpenCms user context * @ param workplaceMode if true , the root and current site is included for the admin user * and the view permission is required to see the site root * @ param showShared if the shared folder should be shown * @ param ouFqn the organizational unit * @ return a list of all site available for the current user */ public List < CmsSite > getAvailableSites ( CmsObject cms , boolean workplaceMode , boolean showShared , String ouFqn ) { } }
return getAvailableSites ( cms , workplaceMode , showShared , ouFqn , null ) ;
public class ListView { /** * Removes given item from this view . * @ since 1.566 */ @ Override public boolean remove ( TopLevelItem item ) throws IOException { } }
synchronized ( this ) { String name = item . getRelativeNameFrom ( getOwner ( ) . getItemGroup ( ) ) ; if ( ! jobNames . remove ( name ) ) return false ; } save ( ) ; return true ;
public class OracleNoSQLClient { /** * ( non - Javadoc ) * @ see com . impetus . kundera . client . Client # find ( java . lang . Class , * java . lang . Object ) */ @ Override public Object find ( Class entityClass , Object key ) { } }
return find ( entityClass , key , null ) ;
public class AbstractJaxb { /** * < p > insert string before the element having specified id attribute . < / p > * @ param id * @ param insString * @ return true if success to insert . if no hit , return false . * @ throws TagTypeUnmatchException */ public boolean insertBeforeId ( String id , String insString ) throws TagTypeUnmatchException { } }
return InsertByIdUtil . insertBeforeId ( id , insString , this ) ;
public class Tools { /** * Get unique values form the array . * @ param values Array of values . * @ return Unique values . */ public static int [ ] Unique ( int [ ] values ) { } }
HashSet < Integer > lst = new HashSet < Integer > ( ) ; for ( int i = 0 ; i < values . length ; i ++ ) { lst . add ( values [ i ] ) ; } int [ ] v = new int [ lst . size ( ) ] ; Iterator < Integer > it = lst . iterator ( ) ; for ( int i = 0 ; i < v . length ; i ++ ) { v [ i ] = it . next ( ) ; } return v ;
public class TheMovieDbApi { /** * Search the movie , tv show and person collections with a single query . * Each item returned in the result array has a media _ type field that maps * to either movie , tv or person . * Each mapped result is the same response you would get from each * independent search * @ param query query * @ param page page * @ param language language * @ param includeAdult includeAdult * @ return * @ throws MovieDbException exception */ public ResultList < MediaBasic > searchMulti ( String query , Integer page , String language , Boolean includeAdult ) throws MovieDbException { } }
return tmdbSearch . searchMulti ( query , page , language , includeAdult ) ;
public class ClobImpl { /** * Returns a copy of the portion of the < code > CLOB < / code > value represented by this * < code > CLOB < / code > object that starts at position < i > position < / i > and has ip to < i > length < / i > * consecutive characters . * @ param pos the position where to get the substring from * @ param length the length of the substring * @ return the substring * @ exception SQLException if there is an error accessing the < code > CLOB < / code > */ @ Override public String getSubString ( long pos , int length ) throws SQLException { } }
if ( length > stringData . length ( ) ) throw new SQLException ( "Clob contains only " + stringData . length ( ) + " characters (asking for " + length + ")." ) ; return stringData . substring ( ( int ) pos - 1 , length ) ;
public class LeaderCache { /** * Generate a HSID string with BALANCE _ SPI _ SUFFIX information . * When this string is updated , we can tell the reason why HSID is changed . */ public static String suffixHSIdsWithMigratePartitionLeaderRequest ( Long HSId ) { } }
return Long . toString ( Long . MAX_VALUE ) + "/" + Long . toString ( HSId ) + migrate_partition_leader_suffix ;
public class MinerAdapter { /** * Gets delta modifications of the given elements in string sets . The elements has to be two * PhysicalEntity chains . The result array is composed of two strings : gained ( 0 ) and lost ( 1 ) . * @ param m match * @ param memLabel1 the member - end of the first PhysicalEntity chain * @ param comLabel1 the complex - end of the first PhysicalEntity chain * @ param memLabel2 the member - end of the second PhysicalEntity chain * @ param comLabel2 the complex - end of the second PhysicalEntity chain * @ return delta modifications */ protected Set < String > [ ] getDeltaModifications ( Match m , String memLabel1 , String comLabel1 , String memLabel2 , String comLabel2 ) { } }
PhysicalEntityChain chain1 = getChain ( m , memLabel1 , comLabel1 ) ; PhysicalEntityChain chain2 = getChain ( m , memLabel2 , comLabel2 ) ; Set < ModificationFeature > before = chain1 . getModifications ( ) ; Set < ModificationFeature > after = chain2 . getModifications ( ) ; Set < String > afterMods = toStringSet ( after ) ; Set < String > beforeMods = toStringSet ( before ) ; removeCommon ( afterMods , beforeMods ) ; return new Set [ ] { afterMods , beforeMods } ;
public class ns_conf_revision_history { /** * < pre > * Use this operation to get ns . conf file revision history . * < / pre > */ public static ns_conf_revision_history [ ] get ( nitro_service client ) throws Exception { } }
ns_conf_revision_history resource = new ns_conf_revision_history ( ) ; resource . validate ( "get" ) ; return ( ns_conf_revision_history [ ] ) resource . get_resources ( client ) ;
public class CryptoServiceSingleton { /** * Compares two signed tokens . * @ param tokenA the first token * @ param tokenB the second token * @ return { @ code true } if the tokens are equals , { @ code false } otherwise */ @ Override public boolean compareSignedTokens ( String tokenA , String tokenB ) { } }
String a = extractSignedToken ( tokenA ) ; String b = extractSignedToken ( tokenB ) ; return a != null && b != null && constantTimeEquals ( a , b ) ;
public class Serialized { /** * Deserializes the serialized object . * @ return The deserialized object . * @ throws ClassNotFoundException If a required class could not be found * during deserialization . */ @ SuppressWarnings ( "unchecked" ) public T deserialize ( ) throws ClassNotFoundException { } }
try { ByteArrayInputStream byteStream = new ByteArrayInputStream ( data ) ; GZIPInputStream gzipStream = new GZIPInputStream ( byteStream ) ; try { ObjectInputStream objectStream = new ObjectInputStream ( gzipStream ) ; object = ( T ) objectStream . readObject ( ) ; return object ; } finally { gzipStream . close ( ) ; } } catch ( IOException e ) { throw new UnexpectedException ( e ) ; }
public class PathfindableConfig { /** * Export the allowed movements . * @ param root The root node ( must not be < code > null < / code > ) . * @ param movements The movements node ( must not be < code > null < / code > ) . */ private static void exportAllowedMovements ( Xml root , Collection < MovementTile > movements ) { } }
for ( final MovementTile movement : movements ) { final Xml node = root . createChild ( NODE_MOVEMENT ) ; node . setText ( movement . name ( ) ) ; }
public class Client { /** * Open the application with the given name , returning an { @ link ApplicationSession } * through which the application can be accessed . An exception is thrown if the given * application is unknown or cannot be accessed . If successful , the session object * will be specific to the type of application opened , and it will have its own * REST session to the Doradus server . * @ param appName Name of an application in the connected Doradus database . * @ return { @ link ApplicationSession } through which the application can be * accessed . * @ see com . dell . doradus . client . OLAPSession * @ see com . dell . doradus . client . SpiderSession */ public ApplicationSession openApplication ( String appName ) { } }
Utils . require ( ! Utils . isEmpty ( appName ) , "appName" ) ; Utils . require ( ! m_restClient . isClosed ( ) , "Client has been closed" ) ; ApplicationDefinition appDef = getAppDef ( appName ) ; Utils . require ( appDef != null , "Unknown application: %s" , appName ) ; RESTClient newRestClient = new RESTClient ( m_restClient ) ; return Client . openApplication ( appDef , newRestClient ) ;
public class AmazonEC2Client { /** * Describes the running instances for the specified Spot Fleet . * @ param describeSpotFleetInstancesRequest * Contains the parameters for DescribeSpotFleetInstances . * @ return Result of the DescribeSpotFleetInstances operation returned by the service . * @ sample AmazonEC2 . DescribeSpotFleetInstances * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / ec2-2016-11-15 / DescribeSpotFleetInstances " target = " _ top " > AWS * API Documentation < / a > */ @ Override public DescribeSpotFleetInstancesResult describeSpotFleetInstances ( DescribeSpotFleetInstancesRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDescribeSpotFleetInstances ( request ) ;
public class OrientDbRepositoryImpl { /** * Acquire { @ link OObjectDatabaseTx } instance linked to this repository . * @ return An active { @ link OObjectDatabaseTx } instance from this repository pool . */ @ Override public OObjectDatabaseTx acquireDb ( ) { } }
OObjectDatabaseTx db = server . acquire ( ) ; ODatabaseRecordThreadLocal . INSTANCE . set ( db . getUnderlying ( ) ) ; return db ;
public class CudaExecutioner { /** * This method return set of key / value * and key / key / value objects , * describing current environment * @ return */ @ Override public synchronized Properties getEnvironmentInformation ( ) { } }
if ( properties == null ) { Properties props = super . getEnvironmentInformation ( ) ; List < Map < String , Object > > devicesList = new ArrayList < > ( ) ; // fill with per - device information : name , memory , versions for ( int i = 0 ; i < nativeOps . getAvailableDevices ( ) ; i ++ ) { Map < String , Object > deviceProps = new HashMap < > ( ) ; deviceProps . put ( Nd4jEnvironment . CUDA_DEVICE_NAME_KEY , nativeOps . getDeviceName ( i ) ) ; deviceProps . put ( Nd4jEnvironment . CUDA_FREE_MEMORY_KEY , nativeOps . getDeviceFreeMemory ( i ) ) ; deviceProps . put ( Nd4jEnvironment . CUDA_TOTAL_MEMORY_KEY , nativeOps . getDeviceTotalMemory ( i ) ) ; deviceProps . put ( Nd4jEnvironment . CUDA_DEVICE_MAJOR_VERSION_KEY , ( long ) nativeOps . getDeviceMajor ( i ) ) ; deviceProps . put ( Nd4jEnvironment . CUDA_DEVICE_MINOR_VERSION_KEY , ( long ) nativeOps . getDeviceMinor ( i ) ) ; devicesList . add ( i , deviceProps ) ; } // fill with basic general info props . put ( Nd4jEnvironment . BACKEND_KEY , "CUDA" ) ; props . put ( Nd4jEnvironment . CUDA_NUM_GPUS_KEY , nativeOps . getAvailableDevices ( ) ) ; props . put ( Nd4jEnvironment . CUDA_DEVICE_INFORMATION_KEY , devicesList ) ; props . put ( Nd4jEnvironment . BLAS_VENDOR_KEY , ( Nd4j . factory ( ) . blas ( ) ) . getBlasVendor ( ) . toString ( ) ) ; props . put ( Nd4jEnvironment . HOST_FREE_MEMORY_KEY , Pointer . maxBytes ( ) - Pointer . totalBytes ( ) ) ; // fill bandwidth information props . put ( Nd4jEnvironment . MEMORY_BANDWIDTH_KEY , PerformanceTracker . getInstance ( ) . getCurrentBandwidth ( ) ) ; properties = props ; } else { List < Map < String , Object > > devicesList = ( List < Map < String , Object > > ) properties . get ( Nd4jEnvironment . CUDA_DEVICE_INFORMATION_KEY ) ; // just update information that might change over time for ( int i = 0 ; i < nativeOps . getAvailableDevices ( ) ; i ++ ) { Map < String , Object > dev = devicesList . get ( i ) ; dev . put ( Nd4jEnvironment . CUDA_FREE_MEMORY_KEY , nativeOps . getDeviceFreeMemory ( i ) ) ; dev . put ( Nd4jEnvironment . CUDA_TOTAL_MEMORY_KEY , nativeOps . getDeviceTotalMemory ( i ) ) ; } properties . put ( Nd4jEnvironment . CUDA_DEVICE_INFORMATION_KEY , devicesList ) ; properties . put ( Nd4jEnvironment . HOST_FREE_MEMORY_KEY , Pointer . maxBytes ( ) - Pointer . totalBytes ( ) ) ; // fill bandwidth information properties . put ( Nd4jEnvironment . MEMORY_BANDWIDTH_KEY , PerformanceTracker . getInstance ( ) . getCurrentBandwidth ( ) ) ; } return properties ;
public class ExpressionBuilder { /** * Appends a matches test to the condition . * @ param trigger the trigger field . * @ param compare the value to use in the compare . * @ return this ExpressionBuilder . */ public ExpressionBuilder matches ( final SubordinateTrigger trigger , final String compare ) { } }
BooleanExpression exp = new CompareExpression ( CompareType . MATCH , trigger , compare ) ; appendExpression ( exp ) ; return this ;
public class Infer { /** * where */ private Type asSuper ( Type t , Type sup ) { } }
return ( sup . hasTag ( ARRAY ) ) ? new ArrayType ( asSuper ( types . elemtype ( t ) , types . elemtype ( sup ) ) , syms . arrayClass ) : types . asSuper ( t , sup . tsym ) ;
public class IpHelper { /** * Gets the hostname of localhost * @ return String * @ throws RuntimeException * When we cannot determine the local machine ' s hostname */ public static String getLocalhost ( ) throws RuntimeException { } }
String hostname = null ; try { InetAddress addr = InetAddress . getLocalHost ( ) ; hostname = addr . getHostName ( ) ; } catch ( UnknownHostException e ) { throw new RuntimeException ( "[FileHelper] {getLocalhost}: Can't get local hostname" ) ; } return hostname ;
public class LinkedHashMapPro { /** * This override alters behavior of superclass put method . It causes newly allocated entry to get * inserted at the end of the linked list and removes the eldest entry if appropriate . */ @ Override void addEntry ( int hash , K key , V value , int bucketIndex ) { } }
super . addEntry ( hash , key , value , bucketIndex ) ; // Remove eldest entry if instructed Entry < K , V > eldest = header . after ; if ( removeEldestEntry ( eldest ) ) { removeEntryForKey ( eldest . key ) ; }
public class AWSShieldClient { /** * Describes the details of a DDoS attack . * @ param describeAttackRequest * @ return Result of the DescribeAttack operation returned by the service . * @ throws InternalErrorException * Exception that indicates that a problem occurred with the service infrastructure . You can retry the * request . * @ throws AccessDeniedException * Exception that indicates the specified < code > AttackId < / code > does not exist , or the requester does not * have the appropriate permissions to access the < code > AttackId < / code > . * @ sample AWSShield . DescribeAttack * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / shield - 2016-06-02 / DescribeAttack " target = " _ top " > AWS API * Documentation < / a > */ @ Override public DescribeAttackResult describeAttack ( DescribeAttackRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDescribeAttack ( request ) ;
public class RepositoryContainer { /** * Get workspace configuration entry by name . * @ param wsName * workspace name * @ return WorkspaceEntry */ public WorkspaceEntry getWorkspaceEntry ( String wsName ) { } }
for ( WorkspaceEntry entry : config . getWorkspaceEntries ( ) ) { if ( entry . getName ( ) . equals ( wsName ) ) return entry ; } return null ;
public class FileIoUtil { /** * Reads a property as boolean from an properties object . * Returns true if read property matches 1 , yes , true , enabled , on , y * @ param _ props * @ param _ property * @ return */ public static boolean readPropertiesBoolean ( Properties _props , String _property ) { } }
if ( _props . containsKey ( _property ) ) { if ( _props . getProperty ( _property ) . matches ( "(?i)(1|yes|true|enabled|on|y)" ) ) { return true ; } } return false ;
public class CarouselRenderer { /** * This methods generates the HTML code of the current b : carousel . * < code > encodeBegin < / code > generates the start of the component . After the , * the JSF framework calls < code > encodeChildren ( ) < / code > to generate the * HTML code between the beginning and the end of the component . For * instance , in the case of a panel component the content of the panel is * generated by < code > encodeChildren ( ) < / code > . After that , * < code > encodeEnd ( ) < / code > is called to generate the rest of the HTML code . * @ param context * the FacesContext . * @ param component * the current b : carousel . * @ throws IOException * thrown if something goes wrong when writing the HTML code . */ @ Override public void encodeBegin ( FacesContext context , UIComponent component ) throws IOException { } }
if ( ! component . isRendered ( ) ) { return ; } Carousel carousel = ( Carousel ) component ; ResponseWriter rw = context . getResponseWriter ( ) ; String clientId = escapeClientId ( component . getClientId ( ) ) ; beginResponsiveWrapper ( component , rw ) ; rw . startElement ( "div" , carousel ) ; Tooltip . generateTooltip ( context , carousel , rw ) ; rw . writeAttribute ( "id" , clientId , "id" ) ; Tooltip . generateTooltip ( context , carousel , rw ) ; AJAXRenderer . generateBootsFacesAJAXAndJavaScript ( context , carousel , rw , false ) ; writeAttribute ( rw , "data-interval" , carousel . getInterval ( ) ) ; writeAttribute ( rw , "data-pause" , carousel . getPause ( ) ) ; writeAttribute ( rw , "data-wrap" , String . valueOf ( carousel . isWrap ( ) ) ) ; if ( carousel . isStartAnimation ( ) ) { rw . writeAttribute ( "data-ride" , "carousel" , "data-ride" ) ; } rw . writeAttribute ( "style" , carousel . getStyle ( ) , "style" ) ; String styleClass = carousel . getStyleClass ( ) ; if ( null == styleClass ) styleClass = "carousel " + ( carousel . isSlide ( ) ? "slide " : "" ) ; else styleClass = "carousel " + ( carousel . isSlide ( ) ? "slide " : "" ) + styleClass ; rw . writeAttribute ( "class" , styleClass , "class" ) ; int activeIndex = carousel . getActiveIndex ( ) ; int currentIndex = 0 ; List < UIComponent > children = carousel . getChildren ( ) ; for ( UIComponent c : children ) { if ( c instanceof CarouselItem ) { ( ( CarouselItem ) c ) . setActive ( currentIndex == activeIndex ) ; currentIndex ++ ; } } if ( ! carousel . isDisabled ( ) ) { if ( carousel . isShowIndicators ( ) ) { rw . startElement ( "ol" , component ) ; rw . writeAttribute ( "class" , "carousel-indicators" , "class" ) ; currentIndex = 0 ; for ( UIComponent c : children ) { if ( c instanceof CarouselItem ) { rw . startElement ( "li" , c ) ; rw . writeAttribute ( "data-target" , "#" + clientId , "data-target" ) ; rw . writeAttribute ( "data-slide-to" , String . valueOf ( currentIndex ) , "data-slide-to" ) ; if ( ( currentIndex == activeIndex ) ) { rw . writeAttribute ( "class" , "active" , "class" ) ; } rw . endElement ( "li" ) ; currentIndex ++ ; } } rw . endElement ( "ol" ) ; } } rw . startElement ( "div" , carousel ) ; rw . writeAttribute ( "class" , "carousel-inner" , "class" ) ; rw . writeAttribute ( "role" , "listbox" , "role" ) ;
public class PropertyParametersImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public void eUnset ( int featureID ) { } }
switch ( featureID ) { case BpsimPackage . PROPERTY_PARAMETERS__PROPERTY : getProperty ( ) . clear ( ) ; return ; } super . eUnset ( featureID ) ;
public class JavaBean { /** * Given a given object , use reflection to determine qualities . * Call the appropriate visitor method to provide the given attributes * @ param bean the object to derive information * @ param visitor object / operation that will be provided the needed information */ public static void acceptVisitor ( Object bean , JavaBeanVisitor visitor ) { } }
if ( bean == null ) return ; Class < ? > beanClass = bean . getClass ( ) ; if ( ClassPath . isPrimitive ( beanClass ) ) { visitor . visitClass ( beanClass , bean ) ; return ; // no properties } PropertyDescriptor descriptors [ ] = getPropertyDescriptors ( bean ) ; Object value = null ; for ( int i = 0 ; i < descriptors . length ; i ++ ) { String name = descriptors [ i ] . getName ( ) ; if ( "class" . equals ( name ) ) { visitor . visitClass ( ( Class < ? > ) getProperty ( bean , name ) , bean ) ; continue ; } if ( descriptors [ i ] . getReadMethod ( ) != null ) { value = getProperty ( bean , name ) ; visitor . visitProperty ( name , value , bean ) ; } }
public class Array { /** * Swaps arr [ a . . ( a + n - 1 ) ] with arr [ b . . ( b + n - 1 ) ] . * @ param arr order array * @ param a first offset * @ param b second offset * @ param n number of values */ public static void swap ( final int [ ] arr , final int a , final int b , final int n ) { } }
for ( int i = 0 ; i < n ; ++ i ) swap ( arr , a + i , b + i ) ;
public class MatFileWriter { /** * Writes < code > MLArrays < / code > into < code > WritableByteChannel < / code > . * @ param channel * the channel to write to * @ param data * the collection of < code > { @ link MLArray } < / code > objects * @ throws IOException * if writing fails */ private synchronized void write ( WritableByteChannel channel , Collection < MLArray > data ) throws IOException { } }
try { // write header writeHeader ( channel ) ; // write data for ( MLArray matrix : data ) { // compress data to save storage Deflater compresser = new Deflater ( ) ; ByteArrayOutputStream2 compressed = new ByteArrayOutputStream2 ( ) ; DataOutputStream dout = new DataOutputStream ( new DeflaterOutputStream ( compressed , compresser ) ) ; writeMatrix ( dout , matrix ) ; dout . flush ( ) ; dout . close ( ) ; // write COMPRESSED tag and compressed data into output channel int compressedSize = compressed . getCount ( ) ; ByteBuffer buf = ByteBuffer . allocateDirect ( 2 * 4 /* Int size */ + compressedSize ) ; buf . putInt ( MatDataTypes . miCOMPRESSED ) ; buf . putInt ( compressedSize ) ; buf . put ( compressed . getBuf ( ) , 0 , compressedSize ) ; buf . flip ( ) ; channel . write ( buf ) ; } } catch ( IOException e ) { throw e ; } finally { channel . close ( ) ; }
public class FlexBase64 { /** * Decodes one Base64 byte buffer into another . This method will return and save state * if the target does not have the required capacity . Subsequent calls with a new target will * resume reading where it last left off ( the source buffer ' s position ) . Similarly not all of the * source data need be available , this method can be repetitively called as data is made available . * < p > The decoder will skip white space , but will error if it detects corruption . < / p > * @ param source the byte buffer to read encoded data from * @ param target the byte buffer to write decoded data to * @ throws IOException if the encoded data is corrupted */ public void decode ( ByteBuffer source , ByteBuffer target ) throws IOException { } }
if ( target == null ) throw new IllegalStateException ( ) ; int last = this . last ; int state = this . state ; int remaining = source . remaining ( ) ; int targetRemaining = target . remaining ( ) ; int b = 0 ; while ( remaining -- > 0 && targetRemaining > 0 ) { b = nextByte ( source , state , last , false ) ; if ( b == MARK ) { last = MARK ; if ( -- remaining <= 0 ) { break ; } b = nextByte ( source , state , last , false ) ; } if ( b == DONE ) { last = state = 0 ; break ; } if ( b == SKIP ) { continue ; } // ( 6 | 2 ) ( 4 | 4 ) ( 2 | 6) if ( state == 0 ) { last = b << 2 ; state ++ ; if ( remaining -- <= 0 ) { break ; } b = nextByte ( source , state , last , false ) ; if ( ( b & 0xF000 ) != 0 ) { source . position ( source . position ( ) - 1 ) ; continue ; } } if ( state == 1 ) { target . put ( ( byte ) ( last | ( b >>> 4 ) ) ) ; last = ( b & 0x0F ) << 4 ; state ++ ; if ( remaining -- <= 0 || -- targetRemaining <= 0 ) { break ; } b = nextByte ( source , state , last , false ) ; if ( ( b & 0xF000 ) != 0 ) { source . position ( source . position ( ) - 1 ) ; continue ; } } if ( state == 2 ) { target . put ( ( byte ) ( last | ( b >>> 2 ) ) ) ; last = ( b & 0x3 ) << 6 ; state ++ ; if ( remaining -- <= 0 || -- targetRemaining <= 0 ) { break ; } b = nextByte ( source , state , last , false ) ; if ( ( b & 0xF000 ) != 0 ) { source . position ( source . position ( ) - 1 ) ; continue ; } } if ( state == 3 ) { target . put ( ( byte ) ( last | b ) ) ; last = state = 0 ; targetRemaining -- ; } } if ( remaining > 0 ) { drain ( source , b , state , last ) ; } this . last = last ; this . state = state ; this . lastPos = source . position ( ) ;
public class CRD { /** * Creates a new CRD out of a byte array . * If possible , a matching , more specific , CRD subtype is returned . Note , that CRD for * specific communication types might expect certain characteristics on * < code > data < / code > ( regarding contained data ) . < br > * @ param data byte array containing the CRD structure * @ param offset start offset of CRD in < code > data < / code > * @ return the new CRD object * @ throws KNXFormatException if no CRD found or invalid structure */ public static CRD createResponse ( byte [ ] data , int offset ) throws KNXFormatException { } }
return ( CRD ) create ( false , data , offset ) ;
public class MyReflectionUtils { /** * Builds a instance of the class for a map containing the values , without specifying the handler for differences * @ param clazz The class to build instance * @ param values The values map * @ return The instance * @ throws InstantiationException Error instantiating * @ throws IllegalAccessException Access error * @ throws IntrospectionException Introspection error * @ throws IllegalArgumentException Argument invalid * @ throws InvocationTargetException Invalid target */ public static < T > T buildInstanceForMap ( Class < T > clazz , Map < String , Object > values ) throws InstantiationException , IllegalAccessException , IntrospectionException , IllegalArgumentException , InvocationTargetException { } }
return buildInstanceForMap ( clazz , values , new MyDefaultReflectionDifferenceHandler ( ) ) ;
public class MomentSketchWrapper { /** * Estimates quantiles given the statistics in a moments sketch . * @ param fractions real values between [ 0,1 ] for which we want to estimate quantiles * @ return estimated quantiles . */ public double [ ] getQuantiles ( double [ ] fractions ) { } }
// The solver attempts to construct a distribution estimate which matches the // statistics tracked by the moments sketch . We can then read off quantile estimates // from the reconstructed distribution . // This operation can be relatively expensive ( ~ 1 ms ) so we set the parameters from distribution // reconstruction to conservative values . MomentSolver ms = new MomentSolver ( data ) ; // Constants here are chosen to yield maximum precision while keeping solve times ~ 1ms on 2Ghz cpu // Grid size can be increased if longer solve times are acceptable ms . setGridSize ( 1024 ) ; ms . setMaxIter ( 15 ) ; ms . solve ( ) ; double [ ] rawQuantiles = ms . getQuantiles ( fractions ) ; for ( int i = 0 ; i < fractions . length ; i ++ ) { if ( useArcSinh ) { rawQuantiles [ i ] = Math . sinh ( rawQuantiles [ i ] ) ; } } return rawQuantiles ;
public class BackupManager { /** * Writes a backup to the specified stream . * @ param os the stream to write to */ public void backup ( OutputStream os ) throws IOException { } }
int count = 0 ; GzipCompressorOutputStream zipStream = new GzipCompressorOutputStream ( os ) ; for ( Master master : mRegistry . getServers ( ) ) { Iterator < JournalEntry > it = master . getJournalEntryIterator ( ) ; while ( it . hasNext ( ) ) { it . next ( ) . toBuilder ( ) . clearSequenceNumber ( ) . build ( ) . writeDelimitedTo ( zipStream ) ; count ++ ; } } // finish ( ) instead of close ( ) since close would close os , which is owned by the caller . zipStream . finish ( ) ; LOG . info ( "Created backup with {} entries" , count ) ;
public class InvertibleCryptographer { /** * Encrypt the text as invertible . * @ param plainText The plain text to be encrypted . ( NotNull , EmptyAllowed ) * @ return The encrypted text from the plain text . ( NotNull , EmptyAllowed : depends on algorithm ) * @ throws CipherFailureException When the cipher fails . */ public synchronized String encrypt ( String plainText ) { } }
assertArgumentNotNull ( "plainText" , plainText ) ; if ( encryptingCipher == null ) { initialize ( ) ; } return new String ( encodeHex ( doEncrypt ( plainText ) ) ) ;
public class ParamTaglet { /** * Given an array of < code > ParamTag < / code > s , return its string representation . * Try to inherit the param tags that are missing . * @ param holder the doc that holds the param tags . * @ param writer the TagletWriter that will write this tag . * @ param formalParameters The array of parmeters ( from type or executable * member ) to check . * @ return the TagletOutput representation of these < code > ParamTag < / code > s . */ private Content getTagletOutput ( boolean isNonTypeParams , Doc holder , TagletWriter writer , Object [ ] formalParameters , ParamTag [ ] paramTags ) { } }
Content result = writer . getOutputInstance ( ) ; Set < String > alreadyDocumented = new HashSet < String > ( ) ; if ( paramTags . length > 0 ) { result . addContent ( processParamTags ( isNonTypeParams , paramTags , getRankMap ( formalParameters ) , writer , alreadyDocumented ) ) ; } if ( alreadyDocumented . size ( ) != formalParameters . length ) { // Some parameters are missing corresponding @ param tags . // Try to inherit them . result . addContent ( getInheritedTagletOutput ( isNonTypeParams , holder , writer , formalParameters , alreadyDocumented ) ) ; } return result ;
public class DatabasesInner { /** * Resumes a database . * @ param resourceGroupName The name of the resource group that contains the resource . You can obtain this value from the Azure Resource Manager API or the portal . * @ param serverName The name of the server . * @ param databaseName The name of the database to be resumed . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable for the request */ public Observable < DatabaseInner > resumeAsync ( String resourceGroupName , String serverName , String databaseName ) { } }
return resumeWithServiceResponseAsync ( resourceGroupName , serverName , databaseName ) . map ( new Func1 < ServiceResponse < DatabaseInner > , DatabaseInner > ( ) { @ Override public DatabaseInner call ( ServiceResponse < DatabaseInner > response ) { return response . body ( ) ; } } ) ;
public class Operation { /** * Uniformly random numbers */ public static Info rand ( final Variable A , final Variable B , ManagerTempVariables manager ) { } }
Info ret = new Info ( ) ; final VariableMatrix output = manager . createMatrix ( ) ; ret . output = output ; if ( A instanceof VariableInteger && B instanceof VariableInteger ) { ret . op = new Operation ( "rand-ii" ) { @ Override public void process ( ) { int numRows = ( ( VariableInteger ) A ) . value ; int numCols = ( ( VariableInteger ) B ) . value ; output . matrix . reshape ( numRows , numCols ) ; RandomMatrices_DDRM . fillUniform ( output . matrix , 0 , 1 , manager . getRandom ( ) ) ; } } ; } else { throw new RuntimeException ( "Expected two integers got " + A + " " + B ) ; } return ret ;
public class srecParser { /** * / home / victor / srec / core / src / main / antlr / srec . g : 40:1 : script : ( require ) * ( script _ stmt _ block ) ? - > ^ ( SCRIPT ( require ) * ( script _ stmt _ block ) ? ) ; */ public final srecParser . script_return script ( ) throws RecognitionException { } }
srecParser . script_return retval = new srecParser . script_return ( ) ; retval . start = input . LT ( 1 ) ; CommonTree root_0 = null ; srecParser . require_return require1 = null ; srecParser . script_stmt_block_return script_stmt_block2 = null ; RewriteRuleSubtreeStream stream_require = new RewriteRuleSubtreeStream ( adaptor , "rule require" ) ; RewriteRuleSubtreeStream stream_script_stmt_block = new RewriteRuleSubtreeStream ( adaptor , "rule script_stmt_block" ) ; try { // / home / victor / srec / core / src / main / antlr / srec . g : 41:2 : ( ( require ) * ( script _ stmt _ block ) ? - > ^ ( SCRIPT ( require ) * ( script _ stmt _ block ) ? ) ) // / home / victor / srec / core / src / main / antlr / srec . g : 41:4 : ( require ) * ( script _ stmt _ block ) ? { // / home / victor / srec / core / src / main / antlr / srec . g : 41:4 : ( require ) * loop1 : do { int alt1 = 2 ; int LA1_0 = input . LA ( 1 ) ; if ( ( LA1_0 == 28 ) ) { alt1 = 1 ; } switch ( alt1 ) { case 1 : // / home / victor / srec / core / src / main / antlr / srec . g : 41:4 : require { pushFollow ( FOLLOW_require_in_script117 ) ; require1 = require ( ) ; state . _fsp -- ; if ( state . failed ) return retval ; if ( state . backtracking == 0 ) stream_require . add ( require1 . getTree ( ) ) ; } break ; default : break loop1 ; } } while ( true ) ; // / home / victor / srec / core / src / main / antlr / srec . g : 41:13 : ( script _ stmt _ block ) ? int alt2 = 2 ; int LA2_0 = input . LA ( 1 ) ; if ( ( LA2_0 == ID || LA2_0 == 33 ) ) { alt2 = 1 ; } switch ( alt2 ) { case 1 : // / home / victor / srec / core / src / main / antlr / srec . g : 41:13 : script _ stmt _ block { pushFollow ( FOLLOW_script_stmt_block_in_script120 ) ; script_stmt_block2 = script_stmt_block ( ) ; state . _fsp -- ; if ( state . failed ) return retval ; if ( state . backtracking == 0 ) stream_script_stmt_block . add ( script_stmt_block2 . getTree ( ) ) ; } break ; } // AST REWRITE // elements : script _ stmt _ block , require // token labels : // rule labels : retval // token list labels : // rule list labels : // wildcard labels : if ( state . backtracking == 0 ) { retval . tree = root_0 ; RewriteRuleSubtreeStream stream_retval = new RewriteRuleSubtreeStream ( adaptor , "rule retval" , retval != null ? retval . tree : null ) ; root_0 = ( CommonTree ) adaptor . nil ( ) ; // 42:3 : - > ^ ( SCRIPT ( require ) * ( script _ stmt _ block ) ? ) { // / home / victor / srec / core / src / main / antlr / srec . g : 42:6 : ^ ( SCRIPT ( require ) * ( script _ stmt _ block ) ? ) { CommonTree root_1 = ( CommonTree ) adaptor . nil ( ) ; root_1 = ( CommonTree ) adaptor . becomeRoot ( ( CommonTree ) adaptor . create ( SCRIPT , "SCRIPT" ) , root_1 ) ; // / home / victor / srec / core / src / main / antlr / srec . g : 42:15 : ( require ) * while ( stream_require . hasNext ( ) ) { adaptor . addChild ( root_1 , stream_require . nextTree ( ) ) ; } stream_require . reset ( ) ; // / home / victor / srec / core / src / main / antlr / srec . g : 42:24 : ( script _ stmt _ block ) ? if ( stream_script_stmt_block . hasNext ( ) ) { adaptor . addChild ( root_1 , stream_script_stmt_block . nextTree ( ) ) ; } stream_script_stmt_block . reset ( ) ; adaptor . addChild ( root_0 , root_1 ) ; } } retval . tree = root_0 ; } } retval . stop = input . LT ( - 1 ) ; if ( state . backtracking == 0 ) { retval . tree = ( CommonTree ) adaptor . rulePostProcessing ( root_0 ) ; adaptor . setTokenBoundaries ( retval . tree , retval . start , retval . stop ) ; } } catch ( RecognitionException re ) { reportError ( re ) ; recover ( input , re ) ; retval . tree = ( CommonTree ) adaptor . errorNode ( input , retval . start , input . LT ( - 1 ) , re ) ; } finally { } return retval ;
public class GetConfigurationSetRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( GetConfigurationSetRequest getConfigurationSetRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( getConfigurationSetRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( getConfigurationSetRequest . getConfigurationSetName ( ) , CONFIGURATIONSETNAME_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class ResultColumn { /** * Converts this result ( in the form of blob ) to the specified object type . * @ param < T > The type to convert . * @ return The instance that was in the form of blob . * @ throws DatabaseEngineRuntimeException If the value is not a blob or if * something goes wrong when reading the object . */ public < T > T toBlob ( ) throws DatabaseEngineRuntimeException { } }
if ( isNull ( ) ) { return null ; } InputStream is ; if ( val instanceof Blob ) { try { is = ( ( Blob ) val ) . getBinaryStream ( ) ; } catch ( final SQLException e ) { throw new DatabaseEngineRuntimeException ( "Error getting blob input stream" , e ) ; } } else if ( val instanceof byte [ ] ) { is = new ByteArrayInputStream ( ( byte [ ] ) val ) ;
public class VirtualMachineScaleSetsInner { /** * Restarts one or more virtual machines in a VM scale set . * @ param resourceGroupName The name of the resource group . * @ param vmScaleSetName The name of the VM scale set . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable for the request */ public Observable < OperationStatusResponseInner > restartAsync ( String resourceGroupName , String vmScaleSetName ) { } }
return restartWithServiceResponseAsync ( resourceGroupName , vmScaleSetName ) . map ( new Func1 < ServiceResponse < OperationStatusResponseInner > , OperationStatusResponseInner > ( ) { @ Override public OperationStatusResponseInner call ( ServiceResponse < OperationStatusResponseInner > response ) { return response . body ( ) ; } } ) ;
public class DelegatingEntityLinks { /** * ( non - Javadoc ) * @ see org . springframework . hateoas . EntityLinks # linkFor ( java . lang . Class , java . lang . Object [ ] ) */ @ Override public LinkBuilder linkFor ( Class < ? > type , Object ... parameters ) { } }
return getPluginFor ( type ) . linkFor ( type , parameters ) ;
public class BasicDigitalObject { /** * assumes m _ pid as subject ; ie RELS - EXT only */ public boolean hasRelationship ( PredicateNode predicate , ObjectNode object ) { } }
return hasRelationship ( PID . toURIReference ( m_pid ) , predicate , object ) ;
public class AbstractRegionPainter { /** * Returns a new FourColors object with each color disabled using * { @ link disable ( Color ) } . * @ param colors the original colors . * @ return the new colors . */ protected FourColors disable ( FourColors colors ) { } }
return new FourColors ( disable ( colors . top ) , disable ( colors . upperMid ) , disable ( colors . lowerMid ) , disable ( colors . bottom ) ) ;
public class WordCluster { /** * 读文件 , 并统计每个字的字频 */ public void read ( Reader reader ) { } }
totalword = 0 ; while ( reader . hasNext ( ) ) { String content = ( String ) reader . next ( ) . getData ( ) ; int prechar = - 1 ; wordProb . adjustOrPutValue ( prechar , 1 , 1 ) ; totalword += content . length ( ) + 2 ; for ( int i = 0 ; i < content . length ( ) + 1 ; i ++ ) { int idx ; if ( i < content . length ( ) ) { String c = String . valueOf ( content . charAt ( i ) ) ; idx = alpahbet . lookupIndex ( c ) ; } else { idx = - 2 ; } wordProb . adjustOrPutValue ( idx , 1 , 1 ) ; TIntFloatHashMap map = pcc . get ( prechar ) ; if ( map == null ) { map = new TIntFloatHashMap ( ) ; pcc . put ( prechar , map ) ; } map . adjustOrPutValue ( idx , 1 , 1 ) ; TIntHashSet left = leftnodes . get ( idx ) ; if ( left == null ) { left = new TIntHashSet ( ) ; leftnodes . put ( idx , left ) ; } left . add ( prechar ) ; TIntHashSet right = rightnodes . get ( prechar ) ; if ( right == null ) { right = new TIntHashSet ( ) ; rightnodes . put ( prechar , right ) ; } right . add ( idx ) ; prechar = idx ; } } lastid = alpahbet . size ( ) ; System . out . println ( "[总个数:]\t" + totalword ) ; int size = alpahbet . size ( ) ; System . out . println ( "[字典大小:]\t" + size ) ; statisticProb ( ) ;
public class OpenIabHelper { /** * Maps the sku and the storeSku for a particular store . * The best practice is to use SKU like < code > com . companyname . application . item < / code > . * Such SKU fits most of stores so it doesn ' t need to be mapped . * If the recommended approach is not applicable , use application internal SKU in the code ( usually it is a SKU for Google Play ) * and map SKU of other stores using this method . OpenIAB will map SKU in both directions , * so you can use only your inner SKU to purchase , consume and check . * @ param sku The logical internal SKU . E . g . redhat * @ param storeSku The store - specific SKU . Shouldn ' t duplicate already mapped values . E . g . appland . redhat * @ param storeName The name of a store . @ see { @ link IOpenAppstore # getAppstoreName ( ) } or { @ link # NAME _ AMAZON } , { @ link # NAME _ GOOGLE } * @ throws java . lang . IllegalArgumentException If one of the arguments is null or empty . * @ deprecated Use { @ link org . onepf . oms . SkuManager # mapSku ( String , String , String ) } */ public static void mapSku ( String sku , String storeName , @ NotNull String storeSku ) { } }
SkuManager . getInstance ( ) . mapSku ( sku , storeName , storeSku ) ;
public class SystemInputJson { /** * Returns the condition definition represented by the given JSON object . */ private static ICondition asValidCondition ( JsonObject json ) { } }
try { return asCondition ( json ) ; } catch ( SystemInputException e ) { throw new SystemInputException ( "Invalid condition" , e ) ; }
public class IntLinkedHashMap { /** * Removes the mapping with the specified key from this map . * @ param key the key of the mapping to remove . * @ return the value of the removed mapping or { @ code null } if no mapping * for the specified key was found . */ public V remove ( final int key ) { } }
IntLinkedEntry < V > entry = removeEntry ( key ) ; if ( entry == null ) { return null ; } V ret = entry . value ; reuseAfterDelete ( entry ) ; return ret ;
public class SocketBindingJBossASClient { /** * Checks to see if there is already a socket binding in the given group . * @ param socketBindingGroupName the name of the socket binding group in which to look for the named socket binding * @ param socketBindingName the name of the socket binding to look for * @ return true if there is an existing socket binding in the given group * @ throws Exception any error */ public boolean isSocketBinding ( String socketBindingGroupName , String socketBindingName ) throws Exception { } }
Address addr = Address . root ( ) . add ( SOCKET_BINDING_GROUP , socketBindingGroupName ) ; String haystack = SOCKET_BINDING ; return null != findNodeInList ( addr , haystack , socketBindingName ) ;
public class AmazonRoute53ResolverClient { /** * Gets information about a resolver rule policy . A resolver rule policy specifies the Resolver operations and * resources that you want to allow another AWS account to be able to use . * @ param getResolverRulePolicyRequest * @ return Result of the GetResolverRulePolicy operation returned by the service . * @ throws InvalidParameterException * One or more parameters in this request are not valid . * @ throws UnknownResourceException * The specified resource doesn ' t exist . * @ throws InternalServiceErrorException * We encountered an unknown error . Try again in a few minutes . * @ sample AmazonRoute53Resolver . GetResolverRulePolicy * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / route53resolver - 2018-04-01 / GetResolverRulePolicy " * target = " _ top " > AWS API Documentation < / a > */ @ Override public GetResolverRulePolicyResult getResolverRulePolicy ( GetResolverRulePolicyRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeGetResolverRulePolicy ( request ) ;
public class TraceSummary { /** * Service IDs from the trace ' s segment documents . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setServiceIds ( java . util . Collection ) } or { @ link # withServiceIds ( java . util . Collection ) } if you want to * override the existing values . * @ param serviceIds * Service IDs from the trace ' s segment documents . * @ return Returns a reference to this object so that method calls can be chained together . */ public TraceSummary withServiceIds ( ServiceId ... serviceIds ) { } }
if ( this . serviceIds == null ) { setServiceIds ( new java . util . ArrayList < ServiceId > ( serviceIds . length ) ) ; } for ( ServiceId ele : serviceIds ) { this . serviceIds . add ( ele ) ; } return this ;
public class SystemConfiguration { /** * Register a service with default properties */ private void registerService ( BundleContext bc , String name , Object serviceInstance ) { } }
Dictionary < String , Object > properties = new Hashtable < String , Object > ( ) ; properties . put ( "service.vendor" , "IBM" ) ; bc . registerService ( name , serviceInstance , properties ) ;
public class Table { /** * Flush all rows from a given position , but do not freeze the table * @ param util a XMLUtil instance for writing XML * @ param appendable where to write * @ param rowIndex the index of the row * @ throws IOException if an I / O error occurs during the flush */ public void flushSomeAvailableRowsFrom ( final XMLUtil util , final Appendable appendable , final int rowIndex ) throws IOException { } }
this . appender . flushSomeAvailableRowsFrom ( util , appendable , rowIndex ) ;
public class CommerceRegionUtil { /** * Returns the commerce regions before and after the current commerce region in the ordered set where commerceCountryId = & # 63 ; . * @ param commerceRegionId the primary key of the current commerce region * @ param commerceCountryId the commerce country ID * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the previous , current , and next commerce region * @ throws NoSuchRegionException if a commerce region with the primary key could not be found */ public static CommerceRegion [ ] findByCommerceCountryId_PrevAndNext ( long commerceRegionId , long commerceCountryId , OrderByComparator < CommerceRegion > orderByComparator ) throws com . liferay . commerce . exception . NoSuchRegionException { } }
return getPersistence ( ) . findByCommerceCountryId_PrevAndNext ( commerceRegionId , commerceCountryId , orderByComparator ) ;
public class AnnisRunner { /** * FIXME : missing tests */ public void doSql ( String funcCall ) { } }
String doSqlFunctionName = "sql_" + funcCall . split ( "\\s" , 2 ) [ 0 ] ; SqlGenerator < QueryData > gen = getGeneratorForQueryFunction ( funcCall ) ; String annisQuery = getAnnisQueryFromFunctionCall ( funcCall ) ; QueryData queryData = analyzeQuery ( annisQuery , doSqlFunctionName ) ; out . println ( "NOTICE: left = " + left + "; right = " + right + "; limit = " + limit + "; offset = " + offset ) ; out . println ( gen . toSql ( queryData ) ) ;
public class BuilderModel { /** * Returns the selected components in an array . */ public int [ ] getSelectedComponents ( ) { } }
int [ ] values = new int [ _selected . size ( ) ] ; Iterator < Integer > iter = _selected . values ( ) . iterator ( ) ; for ( int ii = 0 ; iter . hasNext ( ) ; ii ++ ) { values [ ii ] = iter . next ( ) . intValue ( ) ; } return values ;
public class ElasticPoolOperationsInner { /** * Gets a list of operations performed on the elastic pool . * @ param nextPageLink The NextLink from the previous successful call to List operation . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the PagedList & lt ; ElasticPoolOperationInner & gt ; object if successful . */ public PagedList < ElasticPoolOperationInner > listByElasticPoolNext ( final String nextPageLink ) { } }
ServiceResponse < Page < ElasticPoolOperationInner > > response = listByElasticPoolNextSinglePageAsync ( nextPageLink ) . toBlocking ( ) . single ( ) ; return new PagedList < ElasticPoolOperationInner > ( response . body ( ) ) { @ Override public Page < ElasticPoolOperationInner > nextPage ( String nextPageLink ) { return listByElasticPoolNextSinglePageAsync ( nextPageLink ) . toBlocking ( ) . single ( ) . body ( ) ; } } ;
public class JavacState { /** * For all packages , find all sources belonging to the package , group the sources * based on their transformers and apply the transformers on each source code group . */ private boolean perform ( File outputDir , Map < String , Transformer > suffixRules ) { } }
boolean rc = true ; // Group sources based on transforms . A source file can only belong to a single transform . Map < Transformer , Map < String , Set < URI > > > groupedSources = new HashMap < Transformer , Map < String , Set < URI > > > ( ) ; for ( Source src : now . sources ( ) . values ( ) ) { Transformer t = suffixRules . get ( src . suffix ( ) ) ; if ( t != null ) { if ( taintedPackages . contains ( src . pkg ( ) . name ( ) ) && ! src . isLinkedOnly ( ) ) { addFileToTransform ( groupedSources , t , src ) ; } } } // Go through the transforms and transform them . for ( Map . Entry < Transformer , Map < String , Set < URI > > > e : groupedSources . entrySet ( ) ) { Transformer t = e . getKey ( ) ; Map < String , Set < URI > > srcs = e . getValue ( ) ; // These maps need to be synchronized since multiple threads will be writing results into them . Map < String , Set < URI > > packageArtifacts = Collections . synchronizedMap ( new HashMap < String , Set < URI > > ( ) ) ; Map < String , Set < String > > packageDependencies = Collections . synchronizedMap ( new HashMap < String , Set < String > > ( ) ) ; Map < String , String > packagePublicApis = Collections . synchronizedMap ( new HashMap < String , String > ( ) ) ; boolean r = t . transform ( srcs , visibleSrcs , visibleClasses , prev . dependents ( ) , outputDir . toURI ( ) , packageArtifacts , packageDependencies , packagePublicApis , 0 , isIncremental ( ) , numCores , out , err ) ; if ( ! r ) rc = false ; for ( String p : srcs . keySet ( ) ) { recompiledPackages . add ( p ) ; } // The transform is done ! Extract all the artifacts and store the info into the Package objects . for ( Map . Entry < String , Set < URI > > a : packageArtifacts . entrySet ( ) ) { Module mnow = now . findModuleFromPackageName ( a . getKey ( ) ) ; mnow . addArtifacts ( a . getKey ( ) , a . getValue ( ) ) ; } // Extract all the dependencies and store the info into the Package objects . for ( Map . Entry < String , Set < String > > a : packageDependencies . entrySet ( ) ) { Set < String > deps = a . getValue ( ) ; Module mnow = now . findModuleFromPackageName ( a . getKey ( ) ) ; mnow . setDependencies ( a . getKey ( ) , deps ) ; } // Extract all the pubapis and store the info into the Package objects . for ( Map . Entry < String , String > a : packagePublicApis . entrySet ( ) ) { Module mprev = prev . findModuleFromPackageName ( a . getKey ( ) ) ; List < String > pubapi = Package . pubapiToList ( a . getValue ( ) ) ; Module mnow = now . findModuleFromPackageName ( a . getKey ( ) ) ; mnow . setPubapi ( a . getKey ( ) , pubapi ) ; if ( mprev . hasPubapiChanged ( a . getKey ( ) , pubapi ) ) { // Aha ! The pubapi of this package has changed ! // It can also be a new compile from scratch . if ( mprev . lookupPackage ( a . getKey ( ) ) . existsInJavacState ( ) ) { // This is an incremental compile ! The pubapi // did change . Trigger recompilation of dependents . packagesWithChangedPublicApis . add ( a . getKey ( ) ) ; Log . info ( "The pubapi of " + Util . justPackageName ( a . getKey ( ) ) + " has changed!" ) ; } } } } return rc ;
public class OServerAdmin { /** * Copies a database to a remote server instance . * @ param iDatabaseName * @ param iDatabaseUserName * @ param iDatabaseUserPassword * @ param iRemoteName * @ param iRemoteEngine * @ return The instance itself . Useful to execute method in chain * @ throws IOException */ public synchronized OServerAdmin copyDatabase ( final String iDatabaseName , final String iDatabaseUserName , final String iDatabaseUserPassword , final String iRemoteName , final String iRemoteEngine ) throws IOException { } }
storage . checkConnection ( ) ; try { final OChannelBinaryClient network = storage . beginRequest ( OChannelBinaryProtocol . REQUEST_DB_COPY ) ; try { network . writeString ( iDatabaseName ) ; network . writeString ( iDatabaseUserName ) ; network . writeString ( iDatabaseUserPassword ) ; network . writeString ( iRemoteName ) ; network . writeString ( iRemoteEngine ) ; } finally { storage . endRequest ( network ) ; } storage . getResponse ( network ) ; OLogManager . instance ( ) . debug ( this , "Database '%s' has been copied to the server '%s'" , iDatabaseName , iRemoteName ) ; } catch ( Exception e ) { OLogManager . instance ( ) . exception ( "Cannot copy the database: " + iDatabaseName , e , OStorageException . class ) ; } return this ;
public class TransactionalSharedLuceneLock { /** * Removes the lock , without committing pending changes or involving transactions . Used by Lucene * at Directory creation : we expect the lock to not exist in this case . */ private void clearLock ( ) { } }
Object previousValue = noCacheStoreCache . remove ( keyOfLock ) ; if ( previousValue != null && trace ) { log . tracef ( "Lock removed for index: %s" , indexName ) ; }
public class ProcessorConfigurationUtils { /** * Unwraps a wrapped implementation of { @ link IElementProcessor } . * This method is meant for < strong > internal < / strong > use only . * @ param processor the processor to be unwrapped . * @ return the unwrapped processor . */ public static IElementProcessor unwrap ( final IElementProcessor processor ) { } }
if ( processor == null ) { return null ; } if ( processor instanceof AbstractProcessorWrapper ) { return ( IElementProcessor ) ( ( AbstractProcessorWrapper ) processor ) . unwrap ( ) ; } return processor ;
public class OperationsApi { /** * Get users . * Get [ CfgPerson ] ( https : / / docs . genesys . com / Documentation / PSDK / latest / ConfigLayerRef / CfgPerson ) objects based on the specified filters . * @ param limit Limit the number of users the Provisioning API should return . ( optional ) * @ param offset The number of matches the Provisioning API should skip in the returned users . ( optional ) * @ param order The sort order . ( optional ) * @ param sortBy A comma - separated list of fields to sort on . Possible values are firstName , lastName , and userName . ( optional ) * @ param filterName The name of a filter to use on the results . ( optional ) * @ param filterParameters A part of the users first or last name , if you use the FirstNameOrLastNameMatches filter . ( optional ) * @ param roles Return only return users who have these Workspace Web Edition roles . The roles can be specified in a comma - separated list . Possible values are ROLE _ AGENT and ROLE _ ADMIN , ROLE _ SUPERVISOR . ( optional ) * @ param skills Return only users who have these skills . The skills can be specified in a comma - separated list . ( optional ) * @ param userEnabled Return only enabled or disabled users . ( optional ) * @ param userValid Return only valid or invalid users . ( optional ) * @ param callback The callback function called when the skills are returned asynchronously . Callback takes one parameter : Map & lt ; String , Object & lt ; results . * @ throws ProvisioningApiException if the call is unsuccessful . */ public void getUsersAsync ( Integer limit , Integer offset , String order , String sortBy , String filterName , String filterParameters , String roles , String skills , Boolean userEnabled , String userValid , AsyncCallback callback ) throws ProvisioningApiException { } }
String aioId = UUID . randomUUID ( ) . toString ( ) ; asyncCallbacks . put ( aioId , callback ) ; try { ApiAsyncSuccessResponse resp = operationsApi . getUsersAsync ( aioId , limit , offset , order , sortBy , filterName , filterParameters , roles , skills , userEnabled , userValid ) ; if ( ! resp . getStatus ( ) . getCode ( ) . equals ( 1 ) ) { throw new ProvisioningApiException ( "Error getting users. Code: " + resp . getStatus ( ) . getCode ( ) ) ; } } catch ( ApiException e ) { throw new ProvisioningApiException ( "Error getting users" , e ) ; }
public class AbcGrammar { /** * cleff - middle : : = " middle = " base - note [ octave ] */ Rule ClefMiddle ( ) { } }
return Sequence ( FirstOfS ( IgnoreCase ( "m=" ) , IgnoreCase ( "middle=" ) ) , BaseNote ( ) , OptionalS ( Octave ( ) ) ) . label ( ClefMiddle ) ;
public class Net { /** * Join IPv6 multicast group */ static int join6 ( FileDescriptor fd , byte [ ] group , int index , byte [ ] source ) throws IOException { } }
return joinOrDrop6 ( true , fd , group , index , source ) ;
public class NotDimFilter { /** * There are some special cases involving null that require special casing for And and Or instead of simply taking * the complement * Example 1 : " NOT ( [ 0 , INF ) OR null ) " The inside of NOT would evaluate to null , and the complement would also * be null . However , by breaking the NOT , this statement is " NOT ( [ 0 , INF ) ) AND NOT ( null ) " , which means it should * actually evaluate to ( - INF , 0 ) . * Example 2 : " NOT ( [ 0 , INF ) AND null ) " The inside of NOT would evaluate to [ 0 , INF ) , and the complement would be * ( - INF , 0 ) . However the statement is actually " NOT ( [ 0 , INF ) ) OR NOT ( null ) " , and it should be evaluated to null . */ @ Override public RangeSet < String > getDimensionRangeSet ( String dimension ) { } }
if ( field instanceof AndDimFilter ) { List < DimFilter > fields = ( ( AndDimFilter ) field ) . getFields ( ) ; return new OrDimFilter ( Lists . transform ( fields , NotDimFilter :: new ) ) . getDimensionRangeSet ( dimension ) ; } if ( field instanceof OrDimFilter ) { List < DimFilter > fields = ( ( OrDimFilter ) field ) . getFields ( ) ; return new AndDimFilter ( Lists . transform ( fields , NotDimFilter :: new ) ) . getDimensionRangeSet ( dimension ) ; } if ( field instanceof NotDimFilter ) { return ( ( NotDimFilter ) field ) . getField ( ) . getDimensionRangeSet ( dimension ) ; } RangeSet < String > rangeSet = field . getDimensionRangeSet ( dimension ) ; return rangeSet == null ? null : rangeSet . complement ( ) ;
public class DetectorConfigurationTab { /** * Disables all unchecked detector factories and enables checked factory * detectors , leaving those not in the table unmodified . */ protected void syncUserPreferencesWithTable ( ) { } }
TableItem [ ] itemList = availableFactoriesTableViewer . getTable ( ) . getItems ( ) ; UserPreferences currentProps = getCurrentProps ( ) ; for ( int i = 0 ; i < itemList . length ; i ++ ) { DetectorFactory factory = ( DetectorFactory ) itemList [ i ] . getData ( ) ; // set enabled if defined in configuration currentProps . enableDetector ( factory , itemList [ i ] . getChecked ( ) ) ; }
public class ChatApi { /** * Send a message * Send a message to participants in the specified chat . * @ param id The ID of the chat interaction . ( required ) * @ param acceptData Request parameters . ( optional ) * @ return ApiSuccessResponse * @ throws ApiException If fail to call the API , e . g . server error or cannot deserialize the response body */ public ApiSuccessResponse sendMessage ( String id , AcceptData1 acceptData ) throws ApiException { } }
ApiResponse < ApiSuccessResponse > resp = sendMessageWithHttpInfo ( id , acceptData ) ; return resp . getData ( ) ;
public class UniverseApi { /** * Get star information ( asynchronously ) Get information on a star - - - This * route expires daily at 11:05 * @ param starId * star _ id integer ( required ) * @ param datasource * The server name you would like data from ( optional , default to * tranquility ) * @ param ifNoneMatch * ETag from a previous request . A 304 will be returned if this * matches the current ETag ( optional ) * @ param callback * The callback to be executed when the API call finishes * @ return The request call * @ throws ApiException * If fail to process the API call , e . g . serializing the request * body object */ public com . squareup . okhttp . Call getUniverseStarsStarIdAsync ( Integer starId , String datasource , String ifNoneMatch , final ApiCallback < StarResponse > callback ) throws ApiException { } }
com . squareup . okhttp . Call call = getUniverseStarsStarIdValidateBeforeCall ( starId , datasource , ifNoneMatch , callback ) ; Type localVarReturnType = new TypeToken < StarResponse > ( ) { } . getType ( ) ; apiClient . executeAsync ( call , localVarReturnType , callback ) ; return call ;
public class Version { /** * < pre > * A service with manual scaling runs continuously , allowing you to perform * complex initialization and rely on the state of its memory over time . * < / pre > * < code > . google . appengine . v1 . ManualScaling manual _ scaling = 5 ; < / code > */ public com . google . appengine . v1 . ManualScaling getManualScaling ( ) { } }
if ( scalingCase_ == 5 ) { return ( com . google . appengine . v1 . ManualScaling ) scaling_ ; } return com . google . appengine . v1 . ManualScaling . getDefaultInstance ( ) ;
public class CmsPushButton { /** * Sets the button style . < p > * @ param style the style to set * @ param color the color to set */ public void setButtonStyle ( I_CmsButton . ButtonStyle style , I_CmsButton . ButtonColor color ) { } }
if ( m_buttonStyle != null ) { for ( String styleName : m_buttonStyle . getAdditionalClasses ( ) ) { removeStyleName ( styleName ) ; } } if ( style == ButtonStyle . TRANSPARENT ) { setSize ( null ) ; } addStyleName ( style . getCssClassName ( ) ) ; m_buttonStyle = style ; if ( m_color != null ) { removeStyleName ( m_color . getClassName ( ) ) ; } if ( color != null ) { addStyleName ( color . getClassName ( ) ) ; } m_color = color ;
public class MaskConverter { /** * Retrieve ( in string format ) from this field . * @ return This field as a percent string . */ public String getString ( ) { } }
String string = super . getString ( ) ; if ( ( string != null ) && ( string . length ( ) > 0 ) ) { StringBuffer sb = new StringBuffer ( string ) ; if ( sb . length ( ) > m_iUnmaskedChars ) { String strFiller = "" + FILLER ; for ( int i = 0 ; i < sb . length ( ) - m_iUnmaskedChars ; i ++ ) { sb . replace ( i , i + 1 , strFiller ) ; } string = sb . toString ( ) ; } } return string ;
public class CollectorServlet { /** * { @ inheritDoc } */ @ Override public void destroy ( ) { } }
LOGGER . info ( "collector servlet stopping" ) ; if ( collectorServer != null ) { collectorServer . stop ( ) ; } Collector . stopJRobin ( ) ; LOGGER . info ( "collector servlet stopped" ) ; super . destroy ( ) ;
public class QueryBuilder { /** * A literal term , as in { @ code WHERE k = 1 } . * < p > This method can process any type for which there is a default Java to CQL mapping , namely : * primitive types ( { @ code Integer = > int , Long = > bigint , String = > text , etc . } ) , and collections , * tuples , and user defined types thereof . * < p > A null argument will be rendered as { @ code NULL } . * < p > For custom mappings , use { @ link # literal ( Object , CodecRegistry ) } or { @ link # literal ( Object , * TypeCodec ) } . * @ throws CodecNotFoundException if there is no default CQL mapping for the Java type of { @ code * value } . */ @ NonNull public static Literal literal ( @ Nullable Object value ) { } }
return literal ( value , CodecRegistry . DEFAULT ) ;
public class StrategicQueues { /** * Return a StrategicBlockingQueue backed by an ArrayBlockingQueue of the * given capacity using the given QueueingStrategy . * @ param capacity the capacity of the ArrayBlockingQueue * @ param queueingStrategy the QueueingStrategy to use * @ param < V > the type of elements held in this collection */ public static < V > StrategicBlockingQueue < V > newStrategicArrayBlockingQueue ( int capacity , QueueingStrategy < V > queueingStrategy ) { } }
return new StrategicBlockingQueue < V > ( new ArrayBlockingQueue < V > ( capacity ) , queueingStrategy ) ;
public class BaseEventService { /** * { @ inheritDoc } * @ since 1.0.0 */ public void publish ( Event event ) { } }
logger . debug ( "Dispatching event: " + event . getClass ( ) . toString ( ) ) ; final ArrayList < Class < ? extends Subscriber > > subscriberClasses = subscriptionMap . get ( event . getClass ( ) ) ; if ( subscriberClasses == null ) { logger . debug ( "No subscribers to inform from event: " + event . getClass ( ) . getName ( ) ) ; return ; } for ( Class < ? extends Subscriber > clazz : subscriberClasses ) { logger . debug ( "Alerting subscriber " + clazz . getName ( ) ) ; if ( event instanceof ChainableEvent ) { addTrackedEvent ( ( ChainableEvent ) event ) ; } // Check to see if the Event is Unblocked . If so , use a separate executor pool from normal events final ExecutorService executorService = event instanceof UnblockedEvent ? dynamicExecutor : executor ; executorService . execute ( ( ) -> { try ( AlpineQueryManager qm = new AlpineQueryManager ( ) ) { final EventServiceLog eventServiceLog = qm . createEventServiceLog ( clazz ) ; final Subscriber subscriber = clazz . getDeclaredConstructor ( ) . newInstance ( ) ; subscriber . inform ( event ) ; qm . updateEventServiceLog ( eventServiceLog ) ; if ( event instanceof ChainableEvent ) { ChainableEvent chainableEvent = ( ChainableEvent ) event ; logger . debug ( "Calling onSuccess" ) ; for ( ChainLink chainLink : chainableEvent . onSuccess ( ) ) { if ( chainLink . getSuccessEventService ( ) != null ) { Method method = chainLink . getSuccessEventService ( ) . getMethod ( "getInstance" ) ; IEventService es = ( IEventService ) method . invoke ( chainLink . getSuccessEventService ( ) , new Object [ 0 ] ) ; es . publish ( chainLink . getSuccessEvent ( ) ) ; } else { Event . dispatch ( chainLink . getSuccessEvent ( ) ) ; } } } } catch ( NoSuchMethodException | InvocationTargetException | InstantiationException | IllegalAccessException | SecurityException e ) { logger . error ( "An error occurred while informing subscriber: " + e ) ; if ( event instanceof ChainableEvent ) { ChainableEvent chainableEvent = ( ChainableEvent ) event ; logger . debug ( "Calling onFailure" ) ; for ( ChainLink chainLink : chainableEvent . onFailure ( ) ) { if ( chainLink . getFailureEventService ( ) != null ) { try { Method method = chainLink . getFailureEventService ( ) . getMethod ( "getInstance" ) ; IEventService es = ( IEventService ) method . invoke ( chainLink . getFailureEventService ( ) , new Object [ 0 ] ) ; es . publish ( chainLink . getFailureEvent ( ) ) ; } catch ( NoSuchMethodException | InvocationTargetException | IllegalAccessException ex ) { logger . error ( "Exception while calling onFailure callback" , ex ) ; } } else { Event . dispatch ( chainLink . getFailureEvent ( ) ) ; } } } } if ( event instanceof ChainableEvent ) { removeTrackedEvent ( ( ChainableEvent ) event ) ; } } ) ; }