signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class RedisInner { /** * Import data into Redis cache . * @ param resourceGroupName The name of the resource group . * @ param name The name of the Redis cache . * @ param parameters Parameters for Redis import operation . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent */ public void importData ( String resourceGroupName , String name , ImportRDBParameters parameters ) { } }
importDataWithServiceResponseAsync ( resourceGroupName , name , parameters ) . toBlocking ( ) . last ( ) . body ( ) ;
public class JDBC4ClientConnection { /** * Executes a procedure synchronously and returns the result to the caller . The method * internally tracks execution performance . * @ param procedure * the name of the procedure to call . * @ param parameters * the list of parameters to pass to the procedure . * @ return the response sent back by the VoltDB cluster for the procedure execution . * @ throws IOException * @ throws NoConnectionsException * @ throws ProcCallException */ public ClientResponse execute ( String procedure , long timeout , TimeUnit unit , Object ... parameters ) throws NoConnectionsException , IOException , ProcCallException { } }
ClientImpl currentClient = this . getClient ( ) ; if ( unit == null ) { unit = TimeUnit . SECONDS ; } try { // If connections are lost try reconnecting . ClientResponse response = currentClient . callProcedureWithClientTimeout ( BatchTimeoutOverrideType . NO_TIMEOUT , procedure , timeout , unit , parameters ) ; return response ; } catch ( ProcCallException pce ) { throw pce ; } catch ( NoConnectionsException e ) { this . dropClient ( currentClient ) ; throw e ; }
public class ChainResolver { /** * Compacts the given chain entry by resolving every key within . * @ param entry an uncompacted heterogenous { @ link ServerStoreProxy . ChainEntry } */ public void compact ( ServerStoreProxy . ChainEntry entry ) { } }
ChainBuilder builder = new ChainBuilder ( ) ; for ( PutOperation < K , V > operation : resolveAll ( entry ) . values ( ) ) { builder = builder . add ( codec . encode ( operation ) ) ; } Chain compacted = builder . build ( ) ; if ( compacted . length ( ) < entry . length ( ) ) { entry . replaceAtHead ( compacted ) ; }
public class HDRMetadata { /** * No document node */ @ Override protected IIOMetadataNode getStandardTextNode ( ) { } }
if ( header . getSoftware ( ) != null ) { IIOMetadataNode text = new IIOMetadataNode ( "Text" ) ; IIOMetadataNode textEntry = new IIOMetadataNode ( "TextEntry" ) ; textEntry . setAttribute ( "keyword" , "Software" ) ; textEntry . setAttribute ( "value" , header . getSoftware ( ) ) ; text . appendChild ( textEntry ) ; return text ; } return null ;
public class FieldConstraintsBuilder { /** * Creates days of week mapping . * @ return Map where strings are weekday names in EEE format and integers correspond to their 1-7 mappings */ private static Map < String , Integer > daysOfWeekMapping ( ) { } }
final Map < String , Integer > stringMapping = new HashMap < > ( ) ; stringMapping . put ( "MON" , 1 ) ; stringMapping . put ( "TUE" , 2 ) ; stringMapping . put ( "WED" , 3 ) ; stringMapping . put ( "THU" , 4 ) ; stringMapping . put ( "FRI" , 5 ) ; stringMapping . put ( "SAT" , 6 ) ; stringMapping . put ( "SUN" , 7 ) ; return stringMapping ;
public class Groundy { /** * Inserts an ArrayList < Integer > value into the mapping of this Bundle , replacing any existing * value for the given key . Either key or value may be null . * @ param key a String , or null * @ param value an ArrayList < Integer > object , or null */ public Groundy addIntegerArrayList ( String key , ArrayList < Integer > value ) { } }
mArgs . putIntegerArrayList ( key , value ) ; return this ;
public class AgentServlet { /** * Initialize the backend systems , the log handler and the restrictor . A subclass can tune * this step by overriding { @ link # createRestrictor ( Configuration ) } } and { @ link # createLogHandler ( ServletConfig , boolean ) } * @ param pServletConfig servlet configuration */ @ Override public void init ( ServletConfig pServletConfig ) throws ServletException { } }
super . init ( pServletConfig ) ; Configuration config = initConfig ( pServletConfig ) ; // Create a log handler early in the lifecycle , but not too early String logHandlerClass = config . get ( ConfigKey . LOGHANDLER_CLASS ) ; logHandler = logHandlerClass != null ? ( LogHandler ) ClassUtil . newInstance ( logHandlerClass ) : createLogHandler ( pServletConfig , Boolean . valueOf ( config . get ( ConfigKey . DEBUG ) ) ) ; // Different HTTP request handlers httpGetHandler = newGetHttpRequestHandler ( ) ; httpPostHandler = newPostHttpRequestHandler ( ) ; if ( restrictor == null ) { restrictor = createRestrictor ( config ) ; } else { logHandler . info ( "Using custom access restriction provided by " + restrictor ) ; } configMimeType = config . get ( ConfigKey . MIME_TYPE ) ; addJsr160DispatcherIfExternallyConfigured ( config ) ; backendManager = new BackendManager ( config , logHandler , restrictor ) ; requestHandler = new HttpRequestHandler ( config , backendManager , logHandler ) ; allowDnsReverseLookup = config . getAsBoolean ( ConfigKey . ALLOW_DNS_REVERSE_LOOKUP ) ; streamingEnabled = config . getAsBoolean ( ConfigKey . STREAMING ) ; initDiscoveryMulticast ( config ) ;
public class RemoveIpRoutesRequest { /** * IP address blocks that you want to remove . * @ param cidrIps * IP address blocks that you want to remove . */ public void setCidrIps ( java . util . Collection < String > cidrIps ) { } }
if ( cidrIps == null ) { this . cidrIps = null ; return ; } this . cidrIps = new com . amazonaws . internal . SdkInternalList < String > ( cidrIps ) ;
public class OtpConnection { /** * Receive a messge complete with sender and recipient information . * @ return an { @ link OtpMsg OtpMsg } containing the header information about * the sender and recipient , as well as the actual message contents . * @ exception java . io . IOException * if the connection is not active or a communication error * occurs . * @ exception OtpErlangExit * if an exit signal is received from a process on the peer * node , or if the connection is lost for any reason . * @ exception OtpAuthException * if the remote node sends a message containing an invalid * cookie . */ public OtpMsg receiveMsg ( ) throws IOException , OtpErlangExit , OtpAuthException { } }
final Object o = queue . get ( ) ; if ( o instanceof OtpMsg ) { return ( OtpMsg ) o ; } else if ( o instanceof IOException ) { throw ( IOException ) o ; } else if ( o instanceof OtpErlangExit ) { throw ( OtpErlangExit ) o ; } else if ( o instanceof OtpAuthException ) { throw ( OtpAuthException ) o ; } return null ;
public class DefaultGitHubClient { /** * Get commit statuses from the given commit status url * Retrieve the most recent status for each unique context . * See https : / / developer . github . com / v3 / repos / statuses / # list - statuses - for - a - specific - ref * and https : / / developer . github . com / v3 / repos / statuses / # get - the - combined - status - for - a - specific - ref * @ param statusUrl * @ param repo * @ return * @ throws RestClientException */ public List < CommitStatus > getCommitStatuses ( String statusUrl , GitHubRepo repo ) throws RestClientException { } }
Map < String , CommitStatus > statuses = new HashMap < > ( ) ; // decrypt password String decryptedPassword = decryptString ( repo . getPassword ( ) , settings . getKey ( ) ) ; String personalAccessToken = ( String ) repo . getOptions ( ) . get ( "personalAccessToken" ) ; String decryptedPersonalAccessToken = decryptString ( personalAccessToken , settings . getKey ( ) ) ; boolean lastPage = false ; String queryUrlPage = statusUrl ; while ( ! lastPage ) { ResponseEntity < String > response = makeRestCall ( queryUrlPage , repo . getUserId ( ) , decryptedPassword , decryptedPersonalAccessToken ) ; JSONArray jsonArray = parseAsArray ( response ) ; for ( Object item : jsonArray ) { JSONObject jsonObject = ( JSONObject ) item ; String context = str ( jsonObject , "context" ) ; if ( ( context != null ) && ! statuses . containsKey ( context ) ) { CommitStatus status = new CommitStatus ( ) ; status . setContext ( context ) ; status . setDescription ( str ( jsonObject , "description" ) ) ; status . setState ( str ( jsonObject , "state" ) ) ; statuses . put ( context , status ) ; } } if ( CollectionUtils . isEmpty ( jsonArray ) ) { lastPage = true ; } else { if ( isThisLastPage ( response ) ) { lastPage = true ; } else { lastPage = false ; queryUrlPage = getNextPageUrl ( response ) ; } } } return new ArrayList < > ( statuses . values ( ) ) ;
public class DirectoryConnectSettingsDescription { /** * The IP addresses of the AD Connector servers . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setConnectIps ( java . util . Collection ) } or { @ link # withConnectIps ( java . util . Collection ) } if you want to * override the existing values . * @ param connectIps * The IP addresses of the AD Connector servers . * @ return Returns a reference to this object so that method calls can be chained together . */ public DirectoryConnectSettingsDescription withConnectIps ( String ... connectIps ) { } }
if ( this . connectIps == null ) { setConnectIps ( new com . amazonaws . internal . SdkInternalList < String > ( connectIps . length ) ) ; } for ( String ele : connectIps ) { this . connectIps . add ( ele ) ; } return this ;
public class SpoonUtils { /** * Fetch or create a real device that corresponds to a device model . */ static IDevice obtainRealDevice ( AndroidDebugBridge adb , String serial ) { } }
// Get an existing real device . for ( IDevice adbDevice : adb . getDevices ( ) ) { if ( adbDevice . getSerialNumber ( ) . equals ( serial ) ) { return adbDevice ; } } throw new IllegalArgumentException ( "Unknown device serial: " + serial ) ;
public class CmsHtmlImport { /** * Add a new external link to the storage of external links . < p > * All links in this storage are later used to create entries in the external link gallery . < p > * @ param externalLink link to an external resource * @ return the complete path to the external link file , if one is created . */ public String storeExternalLink ( String externalLink ) { } }
if ( ! CmsStringUtil . isEmptyOrWhitespaceOnly ( m_linkGallery ) ) { m_externalLinks . add ( externalLink ) ; return getExternalLinkFile ( externalLink ) ; } return null ;
public class ApiOvhTelephony { /** * Get directory service code from an APE code ( principal activity of the firm code ) * REST : GET / telephony / { billingAccount } / service / { serviceName } / directory / getDirectoryServiceCode * @ param apeCode [ required ] * @ param billingAccount [ required ] The name of your billingAccount * @ param serviceName [ required ] */ public ArrayList < OvhDirectoryHeadingPJ > billingAccount_service_serviceName_directory_getDirectoryServiceCode_GET ( String billingAccount , String serviceName , String apeCode ) throws IOException { } }
String qPath = "/telephony/{billingAccount}/service/{serviceName}/directory/getDirectoryServiceCode" ; StringBuilder sb = path ( qPath , billingAccount , serviceName ) ; query ( sb , "apeCode" , apeCode ) ; String resp = exec ( qPath , "GET" , sb . toString ( ) , null ) ; return convertTo ( resp , t13 ) ;
public class CourierTemplateSpecGenerator { /** * Return exception for unrecognized schema type . */ private static IllegalStateException unrecognizedSchemaType ( ClassTemplateSpec enclosingClass , String memberName , DataSchema schema ) { } }
return new IllegalStateException ( "Unrecognized schema: " + schema + enclosingClassAndMemberNameToString ( enclosingClass , memberName ) ) ;
public class ProgrammaticWrappingProxyInstaller { /** * Wrap a package doc . * @ param source the source * @ return the wrapper . */ public PackageDoc [ ] wrap ( PackageDoc [ ] source ) { } }
if ( source == null ) { return null ; } final List < PackageDoc > list = new ArrayList < > ( ) ; for ( final PackageDoc element : source ) { if ( isIncluded ( element ) ) { list . add ( wrap ( element ) ) ; } } return Utils . toArray ( source , list ) ;
public class AsciiString { /** * Returns an { @ link AsciiString } containing the given character sequence . If the given string is already a * { @ link AsciiString } , just returns the same instance . */ public static AsciiString of ( CharSequence string ) { } }
return string . getClass ( ) == AsciiString . class ? ( AsciiString ) string : new AsciiString ( string ) ;
public class Storage { /** * Post - construction initialization links all non - dropped members of group together . Returns the primary member * of the group , other members are mirrors . */ static Storage initializeGroup ( Collection < Storage > group ) { } }
List < Storage > sorted = Ordering . natural ( ) . immutableSortedCopy ( group ) ; Storage primary = sorted . get ( 0 ) ; // After sorting , the first storage in each group is the primary . if ( ! primary . isConsistent ( ) ) { // Dropping the primary drops the entire group ( primary + mirrors ) , so ' primary ' must be a mirror that // should have been dropped but was missed due to error conditions related to eventual consistency . return null ; } for ( Storage storage : sorted ) { storage . _group = sorted ; } return primary ;
public class BitString { /** * An efficient method for exchanging data between two bit strings . Both bit strings must * be long enough that they contain the full length of the specified substring . * @ param other The bitstring with which this bitstring should swap bits . * @ param start The start position for the substrings to be exchanged . All bit * indices are big - endian , which means position 0 is the rightmost bit . * @ param length The number of contiguous bits to swap . */ public void swapSubstring ( BitString other , int start , int length ) { } }
assertValidIndex ( start ) ; other . assertValidIndex ( start ) ; int word = start / WORD_LENGTH ; int partialWordSize = ( WORD_LENGTH - start ) % WORD_LENGTH ; if ( partialWordSize > 0 ) { swapBits ( other , word , 0xFFFFFFFF << ( WORD_LENGTH - partialWordSize ) ) ; ++ word ; } int remainingBits = length - partialWordSize ; int stop = remainingBits / WORD_LENGTH ; for ( int i = word ; i < stop ; i ++ ) { int temp = data [ i ] ; data [ i ] = other . data [ i ] ; other . data [ i ] = temp ; } remainingBits %= WORD_LENGTH ; if ( remainingBits > 0 ) { swapBits ( other , word , 0xFFFFFFFF >>> ( WORD_LENGTH - remainingBits ) ) ; }
public class AuthorityKeyIdentifierExtension { /** * Return the encoded key identifier , or null if not specified . */ public byte [ ] getEncodedKeyIdentifier ( ) throws IOException { } }
if ( id != null ) { DerOutputStream derOut = new DerOutputStream ( ) ; id . encode ( derOut ) ; return derOut . toByteArray ( ) ; } return null ;
public class AbstractConsoleServlet { /** * Check access rights to the servlets path - is checking ACLs of the console path * @ param context the current request * @ return ' true ' if access granted or access check switched off */ protected boolean checkConsoleAccess ( BeanContext context ) { } }
String consolePath = getConsolePath ( context ) ; if ( StringUtils . isNotBlank ( consolePath ) ) { return context . getResolver ( ) . getResource ( consolePath ) != null ; } return true ;
public class ArangoResultConverter { /** * Build a GeoResults object with the ArangoCursor returned by query execution * @ param cursor * ArangoCursor containing query results * @ return GeoResults object with all results */ @ SuppressWarnings ( { } }
"rawtypes" , "unchecked" } ) private GeoResults < ? > buildGeoResults ( final ArangoCursor < ? > cursor ) { final List < GeoResult < ? > > list = new LinkedList < > ( ) ; cursor . forEachRemaining ( o -> { final GeoResult < ? > geoResult = buildGeoResult ( o ) ; if ( geoResult != null ) { list . add ( geoResult ) ; } } ) ; return new GeoResults ( list ) ;
public class DatabaseManagerSwing { /** * Added : ( weconsultants @ users ) Needed to aggragate counts per table in jTree */ protected int [ ] getRowCounts ( Vector inTable , Vector inSchema ) throws Exception { } }
if ( ! displayRowCounts ) { return ( null ) ; } String rowCountSelect = "SELECT COUNT(*) FROM " ; int [ ] counts ; String name ; counts = new int [ inTable . size ( ) ] ; try { Statement select = rowConn . createStatement ( ) ; for ( int i = 0 ; i < inTable . size ( ) ; i ++ ) { try { String schemaPart = ( String ) inSchema . elementAt ( i ) ; schemaPart = schemaPart == null ? "" : ( schemaPart + '.' ) ; name = schemaPart + ( String ) inTable . elementAt ( i ) ; ResultSet resultSet = select . executeQuery ( rowCountSelect + name ) ; while ( resultSet . next ( ) ) { counts [ i ] = resultSet . getInt ( 1 ) ; } } catch ( Exception e ) { System . err . println ( "Unable to get row count for table " + inSchema . elementAt ( i ) + '.' + inTable . elementAt ( i ) + ". Using value '0': " + e ) ; } } } catch ( Exception e ) { CommonSwing . errorMessage ( e ) ; } return ( counts ) ;
public class PlatformControl { /** * To be called by ServicePlatform when detected in the top - level application context . */ PlatformControl bind ( ServicePlatform p , XmlWebApplicationContext c , ClassLoader l ) { } }
_platform = p ; _root = c ; _cloader = l ; return this ;
public class AmazonKinesisAsyncClient { /** * Simplified method form for invoking the PutRecord operation . * @ see # putRecordAsync ( PutRecordRequest ) */ @ Override public java . util . concurrent . Future < PutRecordResult > putRecordAsync ( String streamName , java . nio . ByteBuffer data , String partitionKey , String sequenceNumberForOrdering ) { } }
return putRecordAsync ( new PutRecordRequest ( ) . withStreamName ( streamName ) . withData ( data ) . withPartitionKey ( partitionKey ) . withSequenceNumberForOrdering ( sequenceNumberForOrdering ) ) ;
public class JGroupsTransport { /** * Delegator * @ param logicalAddress The logical address * @ param address The address */ public void join ( org . ironjacamar . core . spi . workmanager . Address logicalAddress , org . jgroups . Address address ) { } }
super . join ( logicalAddress , address ) ;
public class RaftContext { /** * Sets the commit index . * @ param commitIndex The commit index . * @ return the previous commit index */ public long setCommitIndex ( long commitIndex ) { } }
checkArgument ( commitIndex >= 0 , "commitIndex must be positive" ) ; long previousCommitIndex = this . commitIndex ; if ( commitIndex > previousCommitIndex ) { this . commitIndex = commitIndex ; logWriter . commit ( Math . min ( commitIndex , logWriter . getLastIndex ( ) ) ) ; long configurationIndex = cluster . getConfiguration ( ) . index ( ) ; if ( configurationIndex > previousCommitIndex && configurationIndex <= commitIndex ) { cluster . commit ( ) ; } setFirstCommitIndex ( commitIndex ) ; } return previousCommitIndex ;
public class BoxApiFolder { /** * Gets a request that permanently deletes a folder from the trash * @ param id id of folder to delete from the trash * @ return request to permanently delete a folder from the trash */ public BoxRequestsFolder . DeleteTrashedFolder getDeleteTrashedFolderRequest ( String id ) { } }
BoxRequestsFolder . DeleteTrashedFolder request = new BoxRequestsFolder . DeleteTrashedFolder ( id , getTrashedFolderUrl ( id ) , mSession ) ; return request ;
public class RDFDatabaseWriter { /** * Writes a single RDF : Description node to the XML stream . * @ param account The account to write from . * @ param writer The XML stream to write into . * @ throws Exception . . . */ private void writeDescription ( Account account , XmlStreamWriter writer ) throws Exception { } }
writer . writeStartElement ( "RDF:Description" ) ; writer . writeAttribute ( "RDF:about" , account . getId ( ) ) ; writer . writeAttribute ( "NS1:name" , account . getName ( ) ) ; writer . writeAttribute ( "NS1:description" , account . getDesc ( ) ) ; if ( ! account . isFolder ( ) ) { writer . writeAttribute ( "NS1:whereLeetLB" , account . getLeetType ( ) . toRdfString ( ) ) ; writer . writeAttribute ( "NS1:leetLevelLB" , Integer . toString ( account . getLeetLevel ( ) . getLevel ( ) ) ) ; if ( account . isHmac ( ) ) writer . writeAttribute ( "NS1:hashAlgorithmLB" , account . getAlgorithm ( ) . toHmacRdfString ( ) ) ; else writer . writeAttribute ( "NS1:hashAlgorithmLB" , account . getAlgorithm ( ) . toRdfString ( ) ) ; writer . writeAttribute ( "NS1:passwordLength" , Integer . toString ( account . getLength ( ) ) ) ; writer . writeAttribute ( "NS1:usernameTB" , account . getUsername ( ) ) ; writer . writeAttribute ( "NS1:counter" , account . getModifier ( ) ) ; writer . writeAttribute ( "NS1:charset" , account . getCharacterSet ( ) ) ; writer . writeAttribute ( "NS1:prefix" , account . getPrefix ( ) ) ; writer . writeAttribute ( "NS1:suffix" , account . getSuffix ( ) ) ; writer . writeAttribute ( "NS1:autoPopulate" , "false" ) ; // TODO : make this a setting allowed in accounts // The default account contains specifiers for extracting pieces of an URL Set < Account . UrlComponents > urlComponents = account . getUrlComponents ( ) ; // only write out any of them , if atleast one is set to true if ( ! urlComponents . isEmpty ( ) ) { writer . writeAttribute ( "NS1:protocolCB" , urlComponents . contains ( Account . UrlComponents . Protocol ) ? "true" : "false" ) ; writer . writeAttribute ( "NS1:subdomainCB" , urlComponents . contains ( Account . UrlComponents . Subdomain ) ? "true" : "false" ) ; writer . writeAttribute ( "NS1:domainCB" , urlComponents . contains ( Account . UrlComponents . Domain ) ? "true" : "false" ) ; writer . writeAttribute ( "NS1:pathCB" , urlComponents . contains ( Account . UrlComponents . PortPathAnchorQuery ) ? "true" : "false" ) ; } // only write out urlToUse if its set , or if no url components are set . if ( urlComponents . isEmpty ( ) || ! account . getUrl ( ) . isEmpty ( ) ) writer . writeAttribute ( "NS1:urlToUse" , account . getUrl ( ) ) ; int patternCount = 0 ; for ( AccountPatternData data : account . getPatterns ( ) ) { writer . writeAttribute ( "NS1:pattern" + patternCount , data . getPattern ( ) ) ; if ( data . getType ( ) == AccountPatternType . WILDCARD ) writer . writeAttribute ( "NS1:patterntype" + patternCount , "wildcard" ) ; else writer . writeAttribute ( "NS1:patterntype" + patternCount , "regex" ) ; writer . writeAttribute ( "NS1:patternenabled" + patternCount , "true" ) ; // TODO : make this a setting allowed in pattern data writer . writeAttribute ( "NS1:patterndesc" + patternCount , data . getDesc ( ) ) ; patternCount ++ ; } } writer . writeEndElement ( ) ;
public class EncodingUtil { /** * Collections */ protected < T > void writeArray ( T [ ] values , WriteOp < T > writer ) { } }
writeList ( Arrays . asList ( values ) , writer ) ;
public class DynamicLoader { /** * Dynamically loads objects from a Properties object . * Supports the use of a dynamic initializer * ( see { @ link org . gautelis . vopn . lang . DynamicInitializer } & lt ; C & gt ; ) * The < i > assignKey < / i > parameter instructs the loader to * assign the key name to the dynamic object by calling * the method assignKey ( String key ) - if it exists . */ public void load ( Properties map , DynamicInitializer < C > di , boolean assignKey ) throws ClassNotFoundException { } }
Iterator keys = map . keySet ( ) . iterator ( ) ; while ( keys . hasNext ( ) ) { String key = ( String ) keys . next ( ) ; String _className = map . getProperty ( key ) ; String className = ( _className != null ? _className . trim ( ) : null ) ; if ( null == className || className . length ( ) == 0 ) { String info = "Misconfiguration? No class name specified for " + description + " " + key ; info += ": Check the configuration!" ; log . warn ( info ) ; continue ; } C object = load ( className , di ) ; if ( null != object ) { if ( assignKey ) { // Method name and parameters String methodName = "assignKey" ; // predefined Object [ ] parameters = { key } ; // Method call try { callMethodOn ( object , methodName , parameters ) ; } catch ( Throwable ignore ) { } } put ( key , object ) ; } }
public class GetCoreDefinitionVersionRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( GetCoreDefinitionVersionRequest getCoreDefinitionVersionRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( getCoreDefinitionVersionRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( getCoreDefinitionVersionRequest . getCoreDefinitionId ( ) , COREDEFINITIONID_BINDING ) ; protocolMarshaller . marshall ( getCoreDefinitionVersionRequest . getCoreDefinitionVersionId ( ) , COREDEFINITIONVERSIONID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class SqlModifyBuilder { /** * Generate SQL . * @ param method * the method * @ param methodBuilder * the method builder */ public static void generateSQL ( final SQLiteModelMethod method , MethodSpec . Builder methodBuilder ) { } }
JQLChecker jqlChecker = JQLChecker . getInstance ( ) ; methodBuilder . addCode ( "\n// generate sql\n" ) ; String sql = jqlChecker . replace ( method , method . jql , new JQLReplacerListenerImpl ( method ) { @ Override public String onColumnNameToUpdate ( String columnName ) { // only entity ' s columns return currentEntity . findPropertyByName ( columnName ) . columnName ; } @ Override public String onColumnName ( String columnName ) { return currentSchema . findColumnNameByPropertyName ( method , columnName ) ; } @ Override public String onBindParameter ( String bindParameterName , boolean inStatement ) { return "?" ; } } ) ; if ( method . jql . dynamicReplace . containsKey ( JQLDynamicStatementType . DYNAMIC_WHERE ) ) { methodBuilder . addStatement ( "String _sql=String.format($S, $L)" , sql . replace ( method . jql . dynamicReplace . get ( JQLDynamicStatementType . DYNAMIC_WHERE ) , "%s" ) , "StringUtils.ifNotEmptyAppend(_sqlDynamicWhere,\" AND \")" ) ; } else { methodBuilder . addStatement ( "String _sql=$S" , sql ) ; }
public class UnmappedReads { /** * Checks and adds the read if we need to remember it for injection . * Returns true if the read was added . */ @ Override public boolean maybeAddRead ( Read read ) { } }
if ( ! isUnmappedMateOfMappedRead ( read ) ) { return false ; } final String reference = read . getNextMatePosition ( ) . getReferenceName ( ) ; String key = getReadKey ( read ) ; Map < String , ArrayList < Read > > reads = unmappedReads . get ( reference ) ; if ( reads == null ) { reads = new HashMap < String , ArrayList < Read > > ( ) ; unmappedReads . put ( reference , reads ) ; } ArrayList < Read > mates = reads . get ( key ) ; if ( mates == null ) { mates = new ArrayList < Read > ( ) ; reads . put ( key , mates ) ; } if ( getReadCount ( ) < MAX_READS ) { mates . add ( read ) ; readCount ++ ; return true ; } else { LOG . warning ( "Reached the limit of in-memory unmapped mates for injection." ) ; } return false ;
public class Type { /** * Creates a new instance of the type . * The returned instances are created using the default ( no - arg ) constructor * if this Type does not represent an array . On the contrary , if this Type * represents an array , an array object of zero - size dimensions is returned * ( like " String [ 0 ] [ 0 ] [ 0 ] " ) . * @ return an object created with the default constructor or a zero - dimensions array * @ throws InstantiationException if the type cannot be instantiated * @ throws IllegalAccessException if the type cannot be instantiated */ public Object newInstance ( ) throws InstantiationException , IllegalAccessException { } }
if ( this . arrayDimensions == 0 ) { return this . componentClass . newInstance ( ) ; } final int [ ] zeroDims = new int [ this . arrayDimensions ] ; Arrays . fill ( zeroDims , 0 ) ; return Array . newInstance ( this . componentClass , zeroDims ) ;
public class MnoHttpClient { /** * Perform a GET request on the specified endpoint * @ param url * @ return response body * @ throws ApiException * @ throws AuthenticationException */ public String get ( String url ) throws AuthenticationException , ApiException { } }
return performRequest ( url , "GET" , null , null , null ) ;
public class Pair { /** * Returns a Pair constructed from X and Y . Convenience method ; the * compiler will disambiguate the classes used for you so that you * don ' t have to write out potentially long class names . */ public static < X , Y > Pair < X , Y > makePair ( X x , Y y ) { } }
return new Pair < X , Y > ( x , y ) ;
public class EntityIntrospector { /** * Processes the entity class and any super classes that are MappedSupperClasses and returns the * fields . * @ return all fields of the entity hierarchy . */ private List < Field > getAllFields ( ) { } }
List < Field > allFields = new ArrayList < > ( ) ; Class < ? > clazz = entityClass ; boolean stop ; do { List < Field > fields = IntrospectionUtils . getPersistableFields ( clazz ) ; allFields . addAll ( fields ) ; clazz = clazz . getSuperclass ( ) ; stop = clazz == null || ! clazz . isAnnotationPresent ( MappedSuperClass . class ) ; } while ( ! stop ) ; return allFields ;
public class ExportQueue { /** * TODO maybe add for stream and iterable */ public static < K2 , V2 > ExportQueue < K2 , V2 > getInstance ( String exportQueueId , SimpleConfiguration appConfig ) { } }
FluentConfigurator opts = FluentConfigurator . load ( exportQueueId , appConfig ) ; try { return new ExportQueue < > ( opts , SimpleSerializer . getInstance ( appConfig ) ) ; } catch ( Exception e ) { // TODO throw new RuntimeException ( e ) ; }
public class ServletTask { /** * Process an HTML get or post . * @ exception ServletException From inherited class . * @ exception IOException From inherited class . */ public void doProcessOutput ( BasicServlet servlet , HttpServletRequest req , HttpServletResponse res , PrintWriter outExt , ScreenModel screen ) throws ServletException , IOException { } }
PrintWriter out = outExt ; if ( screen == null ) { if ( out != null ) out . println ( "Error: default screen not available" ) ; } else { try { // create and execute the query // if no parameters , just print the form String strDatatype = this . getProperty ( DBParams . DATATYPE ) ; // Raw data ( such as in image from the DB ) if ( strDatatype == null ) { if ( out == null ) out = servlet . getOutputStream ( res ) ; // Always ( ( ScreenModel ) screen ) . printReport ( out ) ; } else { // Raw data output ( ( ScreenModel ) screen ) . getScreenFieldView ( ) . sendData ( req , res ) ; } screen . free ( ) ; screen = null ; } catch ( DBException ex ) { if ( out != null ) { out . println ( "<hr>*** SQLException caught ***<p>" ) ; out . println ( "Message: " + ex . getMessage ( ) + "<br>" ) ; ex . printStackTrace ( out ) ; } ex . printStackTrace ( ) ; } catch ( java . lang . Exception ex ) { ex . printStackTrace ( ) ; } finally { if ( outExt == null ) if ( out != null ) out . close ( ) ; } }
public class JDBCResultSet { /** * < ! - - start generic documentation - - > * Updates the designated column with an < code > int < / code > value . * The updater methods are used to update column values in the * current row or the insert row . The updater methods do not * update the underlying database ; instead the < code > updateRow < / code > or * < code > insertRow < / code > methods are called to update the database . * < ! - - end generic documentation - - > * < ! - - start release - specific documentation - - > * < div class = " ReleaseSpecificDocumentation " > * < h3 > HSQLDB - Specific Information : < / h3 > < p > * HSQLDB supports this feature . < p > * < / div > * < ! - - end release - specific documentation - - > * @ param columnIndex the first column is 1 , the second is 2 , . . . * @ param x the new column value * @ exception SQLException if a database access error occurs , * the result set concurrency is < code > CONCUR _ READ _ ONLY < / code > * or this method is called on a closed result set * @ exception SQLFeatureNotSupportedException if the JDBC driver does not support * this method * @ since JDK 1.2 ( JDK 1.1 . x developers : read the overview for * JDBCResultSet ) */ public void updateInt ( int columnIndex , int x ) throws SQLException { } }
startUpdate ( columnIndex ) ; preparedStatement . setIntParameter ( columnIndex , x ) ;
public class AbstractDataBinder { /** * Notifies all listeners , that the data binder starts to load data asynchronously . * @ param key * The key of the data , which should be loaded , as an instance of the generic type * KeyType . The key may not be null * @ param params * An array , which contains optional parameters , as an array of the type ParamType or an * empty array , if no parameters should be used * @ return True , if loading the data should be proceeded , false otherwise */ @ SafeVarargs private final boolean notifyOnLoad ( @ NonNull final KeyType key , @ NonNull final ParamType ... params ) { } }
boolean result = true ; for ( Listener < DataType , KeyType , ViewType , ParamType > listener : listeners ) { result &= listener . onLoadData ( this , key , params ) ; } return result ;
public class JavaClasspathParser { /** * Decodes one XML element with the XML stream . * @ param element * - the considered element * @ param projectName * - the name of project containing the . classpath file * @ param projectRootAbsoluteFullPath * - he path to project containing the . classpath file * @ param unknownElements * - map of unknown elements * @ return the set of CLasspath ENtries extracted from the considered element */ @ SuppressWarnings ( { } }
"checkstyle:npathcomplexity" , "checkstyle:cyclomaticcomplexity" } ) public static IClasspathEntry elementDecode ( Element element , String projectName , IPath projectRootAbsoluteFullPath , Map < IPath , UnknownXmlElements > unknownElements ) { final IPath projectPath = projectRootAbsoluteFullPath ; final NamedNodeMap attributes = element . getAttributes ( ) ; final NodeList children = element . getChildNodes ( ) ; final boolean [ ] foundChildren = new boolean [ children . getLength ( ) ] ; final String kindAttr = removeAttribute ( ClasspathEntry . TAG_KIND , attributes ) ; final String pathAttr = removeAttribute ( ClasspathEntry . TAG_PATH , attributes ) ; // ensure path is absolute IPath path = new Path ( pathAttr ) ; final int kind = kindFromString ( kindAttr ) ; if ( kind != IClasspathEntry . CPE_VARIABLE && kind != IClasspathEntry . CPE_CONTAINER && ! path . isAbsolute ( ) ) { if ( ! ( path . segmentCount ( ) > 0 && path . segment ( 0 ) . equals ( ClasspathEntry . DOT_DOT ) ) ) { path = projectPath . append ( path ) ; } } // source attachment info ( optional ) IPath sourceAttachmentPath = element . hasAttribute ( ClasspathEntry . TAG_SOURCEPATH ) ? new Path ( removeAttribute ( ClasspathEntry . TAG_SOURCEPATH , attributes ) ) : null ; if ( kind != IClasspathEntry . CPE_VARIABLE && sourceAttachmentPath != null && ! sourceAttachmentPath . isAbsolute ( ) ) { sourceAttachmentPath = projectPath . append ( sourceAttachmentPath ) ; } final IPath sourceAttachmentRootPath = element . hasAttribute ( ClasspathEntry . TAG_ROOTPATH ) ? new Path ( removeAttribute ( ClasspathEntry . TAG_ROOTPATH , attributes ) ) : null ; // exported flag ( optional ) final boolean isExported = removeAttribute ( ClasspathEntry . TAG_EXPORTED , attributes ) . equals ( "true" ) ; // $ NON - NLS - 1 $ // inclusion patterns ( optional ) IPath [ ] inclusionPatterns = decodePatterns ( attributes , ClasspathEntry . TAG_INCLUDING ) ; if ( inclusionPatterns == null ) { inclusionPatterns = ClasspathEntry . INCLUDE_ALL ; } // exclusion patterns ( optional ) IPath [ ] exclusionPatterns = decodePatterns ( attributes , ClasspathEntry . TAG_EXCLUDING ) ; if ( exclusionPatterns == null ) { exclusionPatterns = ClasspathEntry . EXCLUDE_NONE ; } // access rules ( optional ) NodeList attributeList = getChildAttributes ( ClasspathEntry . TAG_ACCESS_RULES , children , foundChildren ) ; IAccessRule [ ] accessRules = decodeAccessRules ( attributeList ) ; // backward compatibility if ( accessRules == null ) { accessRules = getAccessRules ( inclusionPatterns , exclusionPatterns ) ; } // combine access rules ( optional ) final boolean combineAccessRestrictions = ! removeAttribute ( ClasspathEntry . TAG_COMBINE_ACCESS_RULES , attributes ) . equals ( "false" ) ; // $ NON - NLS - 1 $ // extra attributes ( optional ) attributeList = getChildAttributes ( ClasspathEntry . TAG_ATTRIBUTES , children , foundChildren ) ; final IClasspathAttribute [ ] extraAttributes = decodeExtraAttributes ( attributeList ) ; // custom output location final IPath outputLocation = element . hasAttribute ( ClasspathEntry . TAG_OUTPUT ) ? projectPath . append ( removeAttribute ( ClasspathEntry . TAG_OUTPUT , attributes ) ) : null ; String [ ] unknownAttributes = null ; ArrayList < String > unknownChildren = null ; if ( unknownElements != null ) { // unknown attributes final int unknownAttributeLength = attributes . getLength ( ) ; if ( unknownAttributeLength != 0 ) { unknownAttributes = new String [ unknownAttributeLength * 2 ] ; for ( int i = 0 ; i < unknownAttributeLength ; i ++ ) { final Node attribute = attributes . item ( i ) ; unknownAttributes [ i * 2 ] = attribute . getNodeName ( ) ; unknownAttributes [ i * 2 + 1 ] = attribute . getNodeValue ( ) ; } } // unknown children for ( int i = 0 , length = foundChildren . length ; i < length ; i ++ ) { if ( ! foundChildren [ i ] ) { final Node node = children . item ( i ) ; if ( node . getNodeType ( ) != Node . ELEMENT_NODE ) { continue ; } if ( unknownChildren == null ) { unknownChildren = new ArrayList < > ( ) ; } final StringBuffer buffer = new StringBuffer ( ) ; decodeUnknownNode ( node , buffer ) ; unknownChildren . add ( buffer . toString ( ) ) ; } } } // recreate the CP entry IClasspathEntry entry = null ; switch ( kind ) { case IClasspathEntry . CPE_PROJECT : /* * IPackageFragmentRoot . K _ SOURCE , IClasspathEntry . CPE _ PROJECT , path , ClasspathEntry . INCLUDE _ ALL , / / inclusion patterns * ClasspathEntry . EXCLUDE _ NONE , / / exclusion patterns null , / / source attachment null , / / source attachment root null , / / specific output * folder */ entry = new ClasspathEntry ( IPackageFragmentRoot . K_SOURCE , IClasspathEntry . CPE_PROJECT , path , ClasspathEntry . INCLUDE_ALL , ClasspathEntry . EXCLUDE_NONE , null , null , null , isExported , accessRules , combineAccessRestrictions , extraAttributes ) ; break ; case IClasspathEntry . CPE_LIBRARY : entry = JavaCore . newLibraryEntry ( path , sourceAttachmentPath , sourceAttachmentRootPath , accessRules , extraAttributes , isExported ) ; break ; case IClasspathEntry . CPE_SOURCE : // must be an entry in this project or specify another project final String projSegment = path . segment ( 0 ) ; if ( projSegment != null && projSegment . equals ( projectName ) ) { // this project entry = JavaCore . newSourceEntry ( path , inclusionPatterns , exclusionPatterns , outputLocation , extraAttributes ) ; } else { if ( path . segmentCount ( ) == 1 ) { // another project entry = JavaCore . newProjectEntry ( path , accessRules , combineAccessRestrictions , extraAttributes , isExported ) ; } else { // an invalid source folder entry = JavaCore . newSourceEntry ( path , inclusionPatterns , exclusionPatterns , outputLocation , extraAttributes ) ; } } break ; case IClasspathEntry . CPE_VARIABLE : entry = JavaCore . newVariableEntry ( path , sourceAttachmentPath , sourceAttachmentRootPath , accessRules , extraAttributes , isExported ) ; break ; case IClasspathEntry . CPE_CONTAINER : entry = JavaCore . newContainerEntry ( path , accessRules , extraAttributes , isExported ) ; break ; case ClasspathEntry . K_OUTPUT : if ( ! path . isAbsolute ( ) ) { return null ; } /* * ClasspathEntry . EXCLUDE _ NONE , null , / / source attachment null , / / source attachment root null , / / custom output location false , null , / / * no access rules false , / / no accessible files to combine */ entry = new ClasspathEntry ( ClasspathEntry . K_OUTPUT , IClasspathEntry . CPE_LIBRARY , path , ClasspathEntry . INCLUDE_ALL , ClasspathEntry . EXCLUDE_NONE , null , null , null , false , null , false , ClasspathEntry . NO_EXTRA_ATTRIBUTES ) ; break ; default : throw new AssertionFailedException ( Messages . bind ( Messages . classpath_unknownKind , kindAttr ) ) ; } if ( unknownAttributes != null || unknownChildren != null ) { final UnknownXmlElements unknownXmlElements = new UnknownXmlElements ( ) ; unknownXmlElements . attributes = unknownAttributes ; unknownXmlElements . children = unknownChildren ; if ( unknownElements != null ) { unknownElements . put ( path , unknownXmlElements ) ; } } return entry ;
public class ZooKeeperMasterModel { /** * Given a jobId and host , returns the N most recent events in its history on that host in the * cluster . */ @ Override public List < TaskStatusEvent > getJobHistory ( final JobId jobId , final String host ) throws JobDoesNotExistException { } }
final Job descriptor = getJob ( jobId ) ; if ( descriptor == null ) { throw new JobDoesNotExistException ( jobId ) ; } final ZooKeeperClient client = provider . get ( "getJobHistory" ) ; final List < String > hosts ; try { hosts = ( ! isNullOrEmpty ( host ) ) ? singletonList ( host ) : client . getChildren ( Paths . historyJobHosts ( jobId ) ) ; } catch ( NoNodeException e ) { return emptyList ( ) ; } catch ( KeeperException e ) { throw new RuntimeException ( e ) ; } final List < TaskStatusEvent > jsEvents = Lists . newArrayList ( ) ; for ( final String h : hosts ) { final List < String > events ; try { events = client . getChildren ( Paths . historyJobHostEvents ( jobId , h ) ) ; } catch ( NoNodeException e ) { continue ; } catch ( KeeperException e ) { throw new RuntimeException ( e ) ; } for ( final String event : events ) { try { final byte [ ] data = client . getData ( Paths . historyJobHostEventsTimestamp ( jobId , h , Long . valueOf ( event ) ) ) ; final TaskStatus status = Json . read ( data , TaskStatus . class ) ; jsEvents . add ( new TaskStatusEvent ( status , Long . valueOf ( event ) , h ) ) ; } catch ( NoNodeException e ) { // ignore , it went away before we read it } catch ( KeeperException | IOException e ) { throw new RuntimeException ( e ) ; } } } return Ordering . from ( EVENT_COMPARATOR ) . sortedCopy ( jsEvents ) ;
public class CollectionUtils { /** * As character type array . * @ param input the input * @ return the char [ ] */ public static char [ ] asCharacterTypeArray ( List < Character > input ) { } }
char [ ] result = new char [ input . size ( ) ] ; for ( int i = 0 ; i < result . length ; i ++ ) { result [ i ] = input . get ( i ) ; } return result ;
public class Ifc2x3tc1FactoryImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public String convertIfcDimensionExtentUsageToString ( EDataType eDataType , Object instanceValue ) { } }
return instanceValue == null ? null : instanceValue . toString ( ) ;
public class TaskManager { /** * Checks , whether the given strings describe existing directories that are writable . If that is not * the case , an exception is raised . * @ param tempDirs * An array of strings which are checked to be paths to writable directories . * @ throws Exception * Thrown , if any of the mentioned checks fails . */ private static final void checkTempDirs ( final String [ ] tempDirs ) throws Exception { } }
for ( int i = 0 ; i < tempDirs . length ; ++ i ) { final String dir = tempDirs [ i ] ; if ( dir == null ) { throw new Exception ( "Temporary file directory #" + ( i + 1 ) + " is null." ) ; } final File f = new File ( dir ) ; if ( ! f . exists ( ) ) { throw new Exception ( "Temporary file directory '" + f . getAbsolutePath ( ) + "' does not exist." ) ; } if ( ! f . isDirectory ( ) ) { throw new Exception ( "Temporary file directory '" + f . getAbsolutePath ( ) + "' is not a directory." ) ; } if ( ! f . canWrite ( ) ) { throw new Exception ( "Temporary file directory '" + f . getAbsolutePath ( ) + "' is not writable." ) ; } }
public class CmsDriverManager { /** * Undelete the resource . < p > * @ param dbc the current database context * @ param resource the name of the resource to apply this operation to * @ throws CmsException if something goes wrong * @ see CmsObject # undeleteResource ( String , boolean ) * @ see I _ CmsResourceType # undelete ( CmsObject , CmsSecurityManager , CmsResource , boolean ) */ public void undelete ( CmsDbContext dbc , CmsResource resource ) throws CmsException { } }
if ( ! resource . getState ( ) . isDeleted ( ) ) { throw new CmsVfsException ( Messages . get ( ) . container ( Messages . ERR_UNDELETE_FOR_RESOURCE_DELETED_1 , dbc . removeSiteRoot ( resource . getRootPath ( ) ) ) ) ; } // set the state to changed resource . setState ( CmsResourceState . STATE_CHANGED ) ; // perform the changes updateState ( dbc , resource , false ) ; // log it log ( dbc , new CmsLogEntry ( dbc , resource . getStructureId ( ) , CmsLogEntryType . RESOURCE_UNDELETED , new String [ ] { resource . getRootPath ( ) } ) , false ) ; // clear the cache m_monitor . clearResourceCache ( ) ; // fire change event Map < String , Object > data = new HashMap < String , Object > ( 2 ) ; data . put ( I_CmsEventListener . KEY_RESOURCE , resource ) ; data . put ( I_CmsEventListener . KEY_CHANGE , new Integer ( CHANGED_RESOURCE ) ) ; OpenCms . fireCmsEvent ( new CmsEvent ( I_CmsEventListener . EVENT_RESOURCE_MODIFIED , data ) ) ;
public class HashUtilities { /** * This method converts a byte array into a string converting each byte into * a 2 - digit hex representation and appending them all together . * @ param byteArray * is the array of bytes to be converted . * @ return A { @ link String } is returned representing the byte array . */ private static String convertByteArrayToString ( byte [ ] byteArray ) { } }
if ( byteArray == null ) { throw new IllegalArgumentException ( "Byte array must not be null!" ) ; } StringBuffer hexString = new StringBuffer ( ) ; for ( int i = 0 ; i < byteArray . length ; i ++ ) { int digit = 0xFF & byteArray [ i ] ; String hexDigits = Integer . toHexString ( digit ) ; if ( hexDigits . length ( ) < 2 ) { hexString . append ( "0" ) ; } hexString . append ( hexDigits ) ; } return hexString . toString ( ) ;
public class JsQueue { /** * 673411 - start */ public QueuedMessage [ ] getQueuedMessages ( java . lang . Integer fromIndexInteger , java . lang . Integer toIndexInteger , java . lang . Integer totalMessagesPerpageInteger ) throws Exception { } }
int fromIndex = fromIndexInteger . intValue ( ) ; int toIndex = toIndexInteger . intValue ( ) ; int totalMessagesPerpage = totalMessagesPerpageInteger . intValue ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "getQueuedMessages fromIndex=" + fromIndex + " toIndex= " + toIndex + " totalMsgs= " + totalMessagesPerpage ) ; List list = new ArrayList ( ) ; Iterator iter = _c . getQueuedMessageIterator ( fromIndex , toIndex , totalMessagesPerpage ) ; // 673411 while ( iter != null && iter . hasNext ( ) ) { SIMPQueuedMessageControllable o = ( SIMPQueuedMessageControllable ) iter . next ( ) ; list . add ( o ) ; } List resultList = new ArrayList ( ) ; iter = list . iterator ( ) ; int i = 0 ; while ( iter . hasNext ( ) ) { Object o = iter . next ( ) ; QueuedMessage qm = SIBMBeanResultFactory . createSIBQueuedMessage ( ( SIMPQueuedMessageControllable ) o ) ; resultList . add ( qm ) ; } QueuedMessage [ ] retValue = ( QueuedMessage [ ] ) resultList . toArray ( new QueuedMessage [ 0 ] ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "getQueuedMessagesfromIndex=" + fromIndex + " toIndex= " + toIndex + " totalMsgs= " + totalMessagesPerpage , retValue ) ; return retValue ;
public class AbstractControllerServer { /** * @ param fieldNumber * @ param value * @ throws CouldNotPerformException */ protected final void setDataField ( int fieldNumber , Object value ) throws CouldNotPerformException { } }
try { try { dataBuilderWriteLock . lock ( ) ; Descriptors . FieldDescriptor findFieldByName = dataBuilder . getDescriptorForType ( ) . findFieldByNumber ( fieldNumber ) ; if ( findFieldByName == null ) { throw new NotAvailableException ( "Field[" + fieldNumber + "] does not exist for type " + dataBuilder . getClass ( ) . getName ( ) ) ; } dataBuilder . setField ( findFieldByName , value ) ; } finally { dataBuilderWriteLock . unlock ( ) ; } } catch ( CouldNotPerformException | NullPointerException ex ) { throw new CouldNotPerformException ( "Could not set field [" + fieldNumber + "=" + value + "] for " + this , ex ) ; }
public class ReportUtil { /** * Get report version from input stream to read report file * @ param is * input stream * @ return report version */ public static String getVersion ( InputStream is ) { } }
try { String text = readAsString ( is ) ; return getVersionFromText ( text ) ; } catch ( IOException e ) { LOG . error ( e . getMessage ( ) , e ) ; return null ; }
public class IdemixIssuerPublicKey { /** * check whether the issuer public key is correct * @ return true iff valid */ public boolean check ( ) { } }
// check formalities of IdemixIssuerPublicKey if ( AttributeNames == null || Hsk == null || HRand == null || HAttrs == null || BarG1 == null || BarG1 . is_infinity ( ) || BarG2 == null || HAttrs . length < AttributeNames . length ) { return false ; } for ( int i = 0 ; i < AttributeNames . length ; i ++ ) { if ( HAttrs [ i ] == null ) { return false ; } } // check proofs ECP2 t1 = IdemixUtils . genG2 . mul ( ProofS ) ; ECP t2 = BarG1 . mul ( ProofS ) ; t1 . add ( W . mul ( BIG . modneg ( ProofC , IdemixUtils . GROUP_ORDER ) ) ) ; t2 . add ( BarG2 . mul ( BIG . modneg ( ProofC , IdemixUtils . GROUP_ORDER ) ) ) ; // Generating proofData that will contain 3 elements in G1 ( of size 2 * FIELD _ BYTES + 1 ) and 3 elements in G2 ( of size 4 * FIELD _ BYTES ) byte [ ] proofData = new byte [ 0 ] ; proofData = IdemixUtils . append ( proofData , IdemixUtils . ecpToBytes ( t1 ) ) ; proofData = IdemixUtils . append ( proofData , IdemixUtils . ecpToBytes ( t2 ) ) ; proofData = IdemixUtils . append ( proofData , IdemixUtils . ecpToBytes ( IdemixUtils . genG2 ) ) ; proofData = IdemixUtils . append ( proofData , IdemixUtils . ecpToBytes ( BarG1 ) ) ; proofData = IdemixUtils . append ( proofData , IdemixUtils . ecpToBytes ( W ) ) ; proofData = IdemixUtils . append ( proofData , IdemixUtils . ecpToBytes ( BarG2 ) ) ; // Hash proofData to hproofdata and compare with proofC return Arrays . equals ( IdemixUtils . bigToBytes ( IdemixUtils . hashModOrder ( proofData ) ) , IdemixUtils . bigToBytes ( ProofC ) ) ;
public class GeometryFactory { /** * Create a new { @ link Polygon } from a { @ link Bbox } . * @ param bbox * Bounding box to convert into a { @ link Polygon } . * @ return Returns a { @ link Polygon } object . */ public Polygon createPolygon ( Bbox bbox ) { } }
Coordinate tl = new Coordinate ( bbox . getX ( ) , bbox . getY ( ) ) ; Coordinate tr = new Coordinate ( bbox . getX ( ) + bbox . getWidth ( ) , bbox . getY ( ) ) ; Coordinate br = new Coordinate ( bbox . getX ( ) + bbox . getWidth ( ) , bbox . getY ( ) + bbox . getHeight ( ) ) ; Coordinate bl = new Coordinate ( bbox . getX ( ) , bbox . getY ( ) + bbox . getHeight ( ) ) ; return new Polygon ( srid , precision , new LinearRing ( srid , precision , new Coordinate [ ] { tl , tr , br , bl , tl } ) , null ) ;
public class QuickSelectSketch { /** * Converts the current state of the sketch into a compact sketch * @ return compact sketch */ @ SuppressWarnings ( "unchecked" ) public CompactSketch < S > compact ( ) { } }
if ( getRetainedEntries ( ) == 0 ) { return new CompactSketch < > ( null , null , theta_ , isEmpty_ ) ; } final long [ ] keys = new long [ getRetainedEntries ( ) ] ; final S [ ] summaries = ( S [ ] ) Array . newInstance ( summaries_ . getClass ( ) . getComponentType ( ) , getRetainedEntries ( ) ) ; int i = 0 ; for ( int j = 0 ; j < keys_ . length ; j ++ ) { if ( summaries_ [ j ] != null ) { keys [ i ] = keys_ [ j ] ; summaries [ i ] = ( S ) summaries_ [ j ] . copy ( ) ; i ++ ; } } return new CompactSketch < > ( keys , summaries , theta_ , isEmpty_ ) ;
public class DNAToRNATranslator { /** * Overloaded local version which delegates to an optional translator * when told to ( specified during construction ) . * @ param originalSequence The DNA sequence to translate * @ return The translated single sequence */ @ Override public List < Sequence < NucleotideCompound > > createSequences ( Sequence < NucleotideCompound > originalSequence ) { } }
if ( shortCutTranslation ) { List < Sequence < NucleotideCompound > > result = new ArrayList < Sequence < NucleotideCompound > > ( 1 ) ; result . add ( wrapToRna ( originalSequence ) ) ; return result ; } else { return super . createSequences ( originalSequence ) ; }
public class BaseMessageTransport { /** * Initialize the RecordOwner . * @ param parentSessionObject Parent that created this session object . * @ param record Main record for this session ( opt ) . * @ param objectID ObjectID of the object that this SessionObject represents ( usually a URL or bookmark ) . */ public void init ( RecordOwnerParent parent , Rec recordMain , Map < String , Object > properties ) { } }
super . init ( parent , recordMain , properties ) ; if ( properties != null ) m_propTransport = properties ; else m_propTransport = this . getTransportProperties ( ) ;
public class SystemApi { /** * Get config . json file . * Returns config . json file . * @ return ApiResponse & lt ; DynconfigGetConfigResponse & gt ; * @ throws ApiException If fail to call the API , e . g . server error or cannot deserialize the response body */ public ApiResponse < DynconfigGetConfigResponse > dynconfigGetConfigWithHttpInfo ( ) throws ApiException { } }
com . squareup . okhttp . Call call = dynconfigGetConfigValidateBeforeCall ( null , null ) ; Type localVarReturnType = new TypeToken < DynconfigGetConfigResponse > ( ) { } . getType ( ) ; return apiClient . execute ( call , localVarReturnType ) ;
public class OptionsCheckForUpdatesPanel { /** * This method initializes panelMisc * @ return javax . swing . JPanel */ private JPanel getPanelMisc ( ) { } }
if ( panelMisc == null ) { panelMisc = new JPanel ( ) ; panelMisc . setLayout ( new GridBagLayout ( ) ) ; JPanel zapPanel = new JPanel ( ) ; zapPanel . setLayout ( new GridBagLayout ( ) ) ; zapPanel . setBorder ( BorderFactory . createTitledBorder ( null , Constant . messages . getString ( "cfu.options.zap.border" ) , TitledBorder . DEFAULT_JUSTIFICATION , javax . swing . border . TitledBorder . DEFAULT_POSITION , FontUtils . getFont ( FontUtils . Size . standard ) , java . awt . Color . black ) ) ; zapPanel . add ( getChkDownloadNewRelease ( ) , LayoutHelper . getGBC ( 0 , 1 , 1 , 1.0D ) ) ; JPanel updPanel = new JPanel ( ) ; updPanel . setLayout ( new GridBagLayout ( ) ) ; updPanel . setBorder ( BorderFactory . createTitledBorder ( null , Constant . messages . getString ( "cfu.options.updates.border" ) , TitledBorder . DEFAULT_JUSTIFICATION , javax . swing . border . TitledBorder . DEFAULT_POSITION , FontUtils . getFont ( FontUtils . Size . standard ) , java . awt . Color . black ) ) ; updPanel . add ( getChkCheckAddonUpdates ( ) , LayoutHelper . getGBC ( 0 , 0 , 1 , 1.0D ) ) ; updPanel . add ( getChkInstallAddonUpdates ( ) , LayoutHelper . getGBC ( 0 , 1 , 1 , 1.0D ) ) ; updPanel . add ( getChkInstallScannerRules ( ) , LayoutHelper . getGBC ( 0 , 2 , 1 , 1.0D ) ) ; JPanel newPanel = new JPanel ( ) ; newPanel . setLayout ( new GridBagLayout ( ) ) ; newPanel . setBorder ( BorderFactory . createTitledBorder ( null , Constant . messages . getString ( "cfu.options.new.border" ) , TitledBorder . DEFAULT_JUSTIFICATION , javax . swing . border . TitledBorder . DEFAULT_POSITION , FontUtils . getFont ( FontUtils . Size . standard ) , java . awt . Color . black ) ) ; newPanel . add ( getChkReportReleaseAddons ( ) , LayoutHelper . getGBC ( 0 , 0 , 1 , 1.0D ) ) ; newPanel . add ( getChkReportBetaAddons ( ) , LayoutHelper . getGBC ( 0 , 1 , 1 , 1.0D ) ) ; newPanel . add ( getChkReportAlphaAddons ( ) , LayoutHelper . getGBC ( 0 , 2 , 1 , 1.0D ) ) ; JPanel dirsPanel = new JPanel ( ) ; dirsPanel . setLayout ( new GridBagLayout ( ) ) ; dirsPanel . setBorder ( BorderFactory . createTitledBorder ( null , Constant . messages . getString ( "cfu.options.dir.border" ) , TitledBorder . DEFAULT_JUSTIFICATION , javax . swing . border . TitledBorder . DEFAULT_POSITION , FontUtils . getFont ( FontUtils . Size . standard ) , java . awt . Color . black ) ) ; dirsPanel . add ( new CfuDirsOptionsPanel ( getScriptDirModel ( ) ) , LayoutHelper . getGBC ( 0 , 0 , 2 , 1.0D , 1.0D ) ) ; JLabel downloadDirLabel = new JLabel ( Constant . messages . getString ( "cfu.options.downloaddir.label" ) ) ; downloadDirLabel . setLabelFor ( getDownloadDirCombo ( ) ) ; dirsPanel . add ( downloadDirLabel , LayoutHelper . getGBC ( 0 , 1 , 1 , 0.5D ) ) ; dirsPanel . add ( getDownloadDirCombo ( ) , LayoutHelper . getGBC ( 1 , 1 , 1 , 0.5D ) ) ; panelMisc . add ( getChkCheckOnStart ( ) , LayoutHelper . getGBC ( 0 , 0 , 1 , 1.0D ) ) ; panelMisc . add ( zapPanel , LayoutHelper . getGBC ( 0 , 1 , 1 , 1.0D ) ) ; panelMisc . add ( updPanel , LayoutHelper . getGBC ( 0 , 2 , 1 , 1.0D ) ) ; panelMisc . add ( newPanel , LayoutHelper . getGBC ( 0 , 3 , 1 , 1.0D ) ) ; panelMisc . add ( dirsPanel , LayoutHelper . getGBC ( 0 , 4 , 1 , 1.0D , 1.0D ) ) ; } return panelMisc ;
public class ResourcePattern { /** * Matches the given resource URI against the pattern . * @ param resource the URI specifying the resource to match * @ return - 1 if the resource does not match , else the number * of prefix segments ( which may be 0) */ @ SuppressWarnings ( { } }
"PMD.CyclomaticComplexity" , "PMD.NPathComplexity" , "PMD.CollapsibleIfStatements" , "PMD.DataflowAnomalyAnalysis" } ) public int matches ( URI resource ) { if ( protocol != null && ! protocol . equals ( "*" ) ) { if ( resource . getScheme ( ) == null ) { return - 1 ; } if ( Arrays . stream ( protocol . split ( "," ) ) . noneMatch ( proto -> proto . equals ( resource . getScheme ( ) ) ) ) { return - 1 ; } } if ( host != null && ! host . equals ( "*" ) ) { if ( resource . getHost ( ) == null || ! resource . getHost ( ) . equals ( host ) ) { return - 1 ; } } if ( port != null && ! port . equals ( "*" ) ) { if ( Integer . parseInt ( port ) != resource . getPort ( ) ) { return - 1 ; } } String [ ] reqElements = PathSpliterator . stream ( resource . getPath ( ) ) . skip ( 1 ) . toArray ( size -> new String [ size ] ) ; String [ ] reqElementsPlus = null ; // Created lazily for ( int pathIdx = 0 ; pathIdx < pathPatternElements . length ; pathIdx ++ ) { String [ ] pathPattern = pathPatternElements [ pathIdx ] ; if ( prefixSegs [ pathIdx ] == pathPattern . length - 1 && lastIsEmpty ( pathPattern ) ) { // Special case , pattern ends with vertical bar if ( reqElementsPlus == null ) { reqElementsPlus = reqElements ; if ( ! lastIsEmpty ( reqElementsPlus ) ) { reqElementsPlus = Arrays . copyOf ( reqElementsPlus , reqElementsPlus . length + 1 ) ; reqElementsPlus [ reqElementsPlus . length - 1 ] = "" ; } } if ( matchPath ( pathPattern , reqElementsPlus ) ) { return prefixSegs [ pathIdx ] ; } } else { if ( matchPath ( pathPattern , reqElements ) ) { return prefixSegs [ pathIdx ] ; } } } return - 1 ;
public class AggregatorExtension { /** * / * ( non - Javadoc ) * @ see com . ibm . jaggr . service . IAggregator . ILoadedExtension # getExtensionPointId ( ) */ @ Override public String getExtensionPointId ( ) { } }
final String sourceMethod = "getExtensionPointId" ; // $ NON - NLS - 1 $ boolean isTraceLogging = log . isLoggable ( Level . FINER ) ; if ( isTraceLogging ) { log . entering ( AggregatorExtension . class . getName ( ) , sourceMethod ) ; log . exiting ( AggregatorExtension . class . getName ( ) , sourceMethod , extensionPointId ) ; } return extensionPointId ;
public class Flowable { /** * Returns a Flowable that emits the item found at a specified index in a sequence of emissions from * this Flowable , or a default item if that index is out of range . * < img width = " 640 " height = " 310 " src = " https : / / raw . github . com / wiki / ReactiveX / RxJava / images / rx - operators / elementAtOrDefault . png " alt = " " > * < dl > * < dt > < b > Backpressure : < / b > < / dt > * < dd > The operator honors backpressure from downstream and consumes the source { @ code Publisher } in an unbounded manner * ( i . e . , no backpressure applied to it ) . < / dd > * < dt > < b > Scheduler : < / b > < / dt > * < dd > { @ code elementAt } does not operate by default on a particular { @ link Scheduler } . < / dd > * < / dl > * @ param index * the zero - based index of the item to retrieve * @ param defaultItem * the default item * @ return a Flowable that emits the item at the specified position in the sequence emitted by the source * Publisher , or the default item if that index is outside the bounds of the source sequence * @ throws IndexOutOfBoundsException * if { @ code index } is less than 0 * @ see < a href = " http : / / reactivex . io / documentation / operators / elementat . html " > ReactiveX operators documentation : ElementAt < / a > */ @ CheckReturnValue @ BackpressureSupport ( BackpressureKind . UNBOUNDED_IN ) @ SchedulerSupport ( SchedulerSupport . NONE ) public final Single < T > elementAt ( long index , T defaultItem ) { } }
if ( index < 0 ) { throw new IndexOutOfBoundsException ( "index >= 0 required but it was " + index ) ; } ObjectHelper . requireNonNull ( defaultItem , "defaultItem is null" ) ; return RxJavaPlugins . onAssembly ( new FlowableElementAtSingle < T > ( this , index , defaultItem ) ) ;
public class PathWrapper { /** * Calculates the 2D bounding box of this route */ public BBox calcBBox2D ( ) { } }
check ( "calcRouteBBox" ) ; BBox bounds = BBox . createInverse ( false ) ; for ( int i = 0 ; i < pointList . getSize ( ) ; i ++ ) { bounds . update ( pointList . getLatitude ( i ) , pointList . getLongitude ( i ) ) ; } return bounds ;
public class DataSetAssertion { /** * Get information for the assertion . * @ param type Type of iterator * @ return An iterator for the specified data . */ Iterator < Row > data ( IteratorType type ) { } }
Iterator < Row > itr ; switch ( type ) { case EXPECTED_DATA : itr = data . getRows ( ) . iterator ( ) ; break ; case ERRORS_EXPECTED : itr = delta . deleted ( ) ; break ; case ERRORS_ACTUAL : itr = delta . inserted ( ) ; break ; default : throw new InternalErrorException ( "Unexpected case!" ) ; } return itr ;
public class KamDialect { /** * { @ inheritDoc } */ @ Override public Set < KamNode > getAdjacentNodes ( KamNode kamNode ) { } }
return wrapNodes ( kam . getAdjacentNodes ( kamNode ) ) ;
public class StatisticsEditsVisitor { /** * Increment the op code counter * @ param opCode opCode for which to increment count */ private void incrementOpCodeCount ( Byte opCode ) { } }
if ( ! opCodeCount . containsKey ( opCode ) ) { opCodeCount . put ( opCode , 0L ) ; } Long newValue = opCodeCount . get ( opCode ) + 1 ; opCodeCount . put ( opCode , newValue ) ;
public class ZRTP { /** * Thread run method */ private void runSession ( ) { } }
if ( ! started ) { logString ( "Thread Starting" ) ; completed = false ; seqNum = getStartSeqNum ( ) ; rtpStack . setNextZrtpSequenceNumber ( getStartSeqNum ( ) ) ; state = ZRTP_STATE_INACTIVE ; initiator = false ; hashMode = HashType . UNDEFINED ; dhMode = KeyAgreementType . DH3K ; sasMode = SasType . UNDEFINED ; farEndZID = null ; farEndH0 = null ; farEndClientID = "" ; isLegacyClient = false ; // farEndH1 = null ; // farEndH2 = null ; // farEndH3 = null ; farEndZID = null ; dhPart1Msg = null ; dhPart2Msg = null ; rxHelloMsg = txHelloMsg = commitMsg = null ; msgConfirm1TX = msgConfirm2TX = null ; msgConfirm1RX = msgConfirm2RX = null ; msgErrorTX = null ; try { // TODO : create after algorithm negotiation dhSuite . setAlgorithm ( KeyAgreementType . DH3K ) ; // Initialize the retransmission timer interval timerInterval = T1_INITIAL_INTERVAL ; sendHello ( ) ; started = true ; } catch ( Throwable e ) { logError ( "Exception sending initial Hello message: " + e . toString ( ) ) ; e . printStackTrace ( ) ; completed = true ; } while ( ! completed ) { synchronized ( lock ) { try { lock . wait ( ) ; } catch ( Throwable e ) { logString ( "Thread Interrupted E:" + e ) ; } } processQueuedMessages ( ) ; } endSession ( ) ; logString ( "Thread Ending" ) ; }
public class TouchActions { /** * Allows the execution of the gesture ' up ' on the screen . It is typically the last of a sequence * of touch gestures . * @ param x The x coordinate relative to the viewport * @ param y The y coordinate relative to the viewport * @ return self */ public TouchActions up ( int x , int y ) { } }
if ( touchScreen != null ) { action . addAction ( new UpAction ( touchScreen , x , y ) ) ; } return this ;
public class Pattern { /** * Appends a new group pattern to the existing one . The new pattern enforces non - strict * temporal contiguity . This means that a matching event of this pattern and the * preceding matching event might be interleaved with other events which are ignored . * @ param group the pattern to append * @ return A new pattern which is appended to this one */ public GroupPattern < T , F > followedBy ( Pattern < T , F > group ) { } }
return new GroupPattern < > ( this , group , ConsumingStrategy . SKIP_TILL_NEXT , afterMatchSkipStrategy ) ;
public class EvernoteHtmlHelper { /** * Makes a GET request to download the note content as HTML . Call { @ link # parseBody ( Response ) } * to get the note content from the returned response . * @ param noteGuid The desired note . * @ return The server response . You can check the status code if the request was successful . */ public Response downloadNote ( @ NonNull String noteGuid ) throws IOException { } }
String url = mBaseUrl + '/' + noteGuid ; return fetchEvernoteUrl ( url ) ;
public class TextDataLoader { /** * To be called by the { @ link # initialLoad ( ) } method . * It will take in the text and add a new document * vector to the data set . Once all text documents * have been loaded , this method should never be * called again . < br > * < br > * This method is thread safe . * @ param text the text of the document to add * @ return the index of the created document for the given text . Starts from * zero and counts up . */ protected int addOriginalDocument ( String text ) { } }
if ( noMoreAdding ) throw new RuntimeException ( "Initial data set has been finalized" ) ; StringBuilder localWorkSpace = workSpace . get ( ) ; List < String > localStorageSpace = storageSpace . get ( ) ; Map < String , Integer > localWordCounts = wordCounts . get ( ) ; if ( localWorkSpace == null ) { localWorkSpace = new StringBuilder ( ) ; localStorageSpace = new ArrayList < String > ( ) ; localWordCounts = new LinkedHashMap < String , Integer > ( ) ; workSpace . set ( localWorkSpace ) ; storageSpace . set ( localStorageSpace ) ; wordCounts . set ( localWordCounts ) ; } localWorkSpace . setLength ( 0 ) ; localStorageSpace . clear ( ) ; localWordCounts . clear ( ) ; tokenizer . tokenize ( text , localWorkSpace , localStorageSpace ) ; for ( String word : localStorageSpace ) { Integer count = localWordCounts . get ( word ) ; if ( count == null ) localWordCounts . put ( word , 1 ) ; else localWordCounts . put ( word , count + 1 ) ; } SparseVector vec = new SparseVector ( currentLength . get ( ) + 1 , localWordCounts . size ( ) ) ; // + 1 to avoid issues when its length is zero , will be corrected in finalization step anyway for ( Iterator < Map . Entry < String , Integer > > iter = localWordCounts . entrySet ( ) . iterator ( ) ; iter . hasNext ( ) ; ) { Map . Entry < String , Integer > entry = iter . next ( ) ; String word = entry . getKey ( ) ; int ms_to_sleep = 1 ; while ( ! addWord ( word , vec , entry . getValue ( ) ) ) // try in a loop , expoential back off { try { Thread . sleep ( ms_to_sleep ) ; ms_to_sleep = Math . min ( 100 , ms_to_sleep * 2 ) ; } catch ( InterruptedException ex ) { Logger . getLogger ( TextDataLoader . class . getName ( ) ) . log ( Level . SEVERE , null , ex ) ; } } } localWordCounts . clear ( ) ; synchronized ( vectors ) { vectors . add ( vec ) ; return documents ++ ; }
public class StandardBullhornData { /** * { @ inheritDoc } */ @ Override public FileApiResponse deleteFile ( Class < ? extends FileEntity > type , Integer entityId , Integer fileId ) { } }
return this . handleDeleteFile ( type , entityId , fileId ) ;
public class JCudaDriver { /** * Computes the elapsed time between two events . * < pre > * CUresult cuEventElapsedTime ( * float * pMilliseconds , * CUevent hStart , * CUevent hEnd ) * < / pre > * < div > * < p > Computes the elapsed time between two * events . Computes the elapsed time between two events ( in milliseconds * with a resolution * of around 0.5 microseconds ) . * < p > If either event was last recorded in a * non - NULL stream , the resulting time may be greater than expected ( even * if both used * the same stream handle ) . This happens * because the cuEventRecord ( ) operation takes place asynchronously and * there is no guarantee that the measured latency is actually just * between the two * events . Any number of other different * stream operations could execute in between the two measured events , * thus altering the * timing in a significant way . * < p > If cuEventRecord ( ) has not been called * on either event then CUDA _ ERROR _ INVALID _ HANDLE is returned . If * cuEventRecord ( ) has been called on both events but one or both of them * has not yet been completed ( that is , cuEventQuery ( ) would return * CUDA _ ERROR _ NOT _ READY on at least one of the events ) , CUDA _ ERROR _ NOT _ READY * is returned . If either event was created with the CU _ EVENT _ DISABLE _ TIMING * flag , then this function will return CUDA _ ERROR _ INVALID _ HANDLE . * < div > * < span > Note : < / span > * < p > Note that this * function may also return error codes from previous , asynchronous * launches . * < / div > * < / div > * @ param pMilliseconds Time between hStart and hEnd in ms * @ param hStart Starting event * @ param hEnd Ending event * @ return CUDA _ SUCCESS , CUDA _ ERROR _ DEINITIALIZED , CUDA _ ERROR _ NOT _ INITIALIZED , * CUDA _ ERROR _ INVALID _ CONTEXT , CUDA _ ERROR _ INVALID _ HANDLE , * CUDA _ ERROR _ NOT _ READY * @ see JCudaDriver # cuEventCreate * @ see JCudaDriver # cuEventRecord * @ see JCudaDriver # cuEventQuery * @ see JCudaDriver # cuEventSynchronize * @ see JCudaDriver # cuEventDestroy */ public static int cuEventElapsedTime ( float pMilliseconds [ ] , CUevent hStart , CUevent hEnd ) { } }
return checkResult ( cuEventElapsedTimeNative ( pMilliseconds , hStart , hEnd ) ) ;
public class IPMolecularLearningDescriptor { /** * It calculates the first ionization energy of a molecule . * @ param atomContainer AtomContainer * @ return The first ionization energy */ @ Override public DescriptorValue calculate ( IAtomContainer atomContainer ) { } }
IAtomContainer local ; if ( addlp ) { try { local = ( IAtomContainer ) atomContainer . clone ( ) ; LonePairElectronChecker lpcheck = new LonePairElectronChecker ( ) ; lpcheck . saturate ( local ) ; } catch ( CloneNotSupportedException e ) { return new DescriptorValue ( getSpecification ( ) , getParameterNames ( ) , getParameters ( ) , new DoubleResult ( Double . NaN ) , getDescriptorNames ( ) , e ) ; } catch ( CDKException e ) { return new DescriptorValue ( getSpecification ( ) , getParameterNames ( ) , getParameters ( ) , new DoubleResult ( Double . NaN ) , getDescriptorNames ( ) , e ) ; } } else local = atomContainer ; DoubleResult value ; try { value = new DoubleResult ( ( ( DoubleArrayResult ) calculatePlus ( local ) . getValue ( ) ) . get ( 0 ) ) ; } catch ( CDKException e ) { return new DescriptorValue ( getSpecification ( ) , getParameterNames ( ) , getParameters ( ) , new DoubleResult ( Double . NaN ) , getDescriptorNames ( ) , e ) ; } return new DescriptorValue ( getSpecification ( ) , getParameterNames ( ) , getParameters ( ) , value , getDescriptorNames ( ) ) ;
public class SslContextBuilder { /** * The cipher suites to enable , in the order of preference . { @ code cipherFilter } will be * applied to the ciphers before use . If { @ code ciphers } is { @ code null } , then the default * cipher suites will be used . */ public SslContextBuilder ciphers ( Iterable < String > ciphers , CipherSuiteFilter cipherFilter ) { } }
checkNotNull ( cipherFilter , "cipherFilter" ) ; this . ciphers = ciphers ; this . cipherFilter = cipherFilter ; return this ;
public class RemoteBrowserIterator { /** * / * ( non - Javadoc ) * @ see java . util . Iterator # remove ( ) */ public void remove ( ) { } }
if ( tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "remove" ) ; browserIterator . remove ( ) ; if ( tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "remove" ) ;
public class LogRecordTableModel { /** * { time , state , taskID , taskType } */ @ Override public Object getValueAt ( int rowIndex , int columnIndex ) { } }
LogRecord log = logRecords . get ( rowIndex ) ; if ( log != null ) { Object [ ] params = log . getParameters ( ) ; switch ( columnIndex ) { case 0 : return Integer . parseInt ( ( String ) log . getParameters ( ) [ 0 ] ) ; case 1 : if ( params != null ) { return log . getParameters ( ) [ 1 ] ; } break ; case 2 : if ( params != null ) { Automaton aut = ( Automaton ) params [ 5 ] ; return aut . getAgent ( ) . getId ( ) ; } return log . getLoggerName ( ) ; case 3 : if ( params != null ) { return params [ 2 ] ; } break ; case 4 : if ( params != null ) { return params [ 3 ] ; } break ; case 5 : if ( params != null ) { return params [ 4 ] ; } break ; case 6 : if ( params != null ) { Automaton aut = ( Automaton ) params [ 5 ] ; return aut . getFinishCondition ( ) ; } break ; case 7 : return log . getMessage ( ) ; } } return "" ;
public class ArrayDatabase { @ Override public synchronized void put ( Integer key , T value ) { } }
int intKey = key ; while ( this . data . size ( ) <= intKey ) { this . data . add ( null ) ; } if ( this . data . get ( intKey ) != null ) { throw new DatabaseException ( "Database already has a value for key [" + key + "]" ) ; } this . data . set ( intKey , value ) ;
public class CreateFunctionFromMethod { /** * Find out if the function is defined . It might be defined in the * FunctionForVoltDB table . It also might be in the VoltXML . * @ param functionName * @ return */ private boolean isDefinedFunctionName ( String functionName ) { } }
return FunctionForVoltDB . isFunctionNameDefined ( functionName ) || FunctionSQL . isFunction ( functionName ) || FunctionCustom . getFunctionId ( functionName ) != ID_NOT_DEFINED || ( null != m_schema . findChild ( "ud_function" , functionName ) ) ;
public class LineSegmentPath { /** * Add a node to the path with the specified destination point and facing direction . * @ param x the x - position . * @ param y the y - position . * @ param dir the facing direction . */ public void addNode ( int x , int y , int dir ) { } }
_nodes . add ( new PathNode ( x , y , dir ) ) ;
public class Firmata { /** * Add a messageListener to the Firmata object which will fire whenever a matching message is received * over the SerialPort that corresponds to the given DigitalChannel . * @ param channel DigitalChannel to listen on * @ param messageListener MessageListener object to handle a received Message event over the SerialPort . */ public void addMessageListener ( DigitalChannel channel , MessageListener < ? extends Message > messageListener ) { } }
addListener ( channel . getIdentifier ( ) , messageListener . getMessageType ( ) , messageListener ) ;
public class ArrayELResolver { /** * If the base object is a Java language array , attempts to set the value at the given index * with the given value . The index is specified by the property argument , and coerced into an * integer . If the coercion could not be performed , an IllegalArgumentException is thrown . If * the index is out of bounds , a PropertyNotFoundException is thrown . If the base is a Java * language array , the propertyResolved property of the ELContext object must be set to true by * this resolver , before returning . If this property is not true after this method is called , * the caller can safely assume no value was set . If this resolver was constructed in read - only * mode , this method will always throw PropertyNotWritableException . * @ param context * The context of this evaluation . * @ param base * The array to analyze . Only bases that are a Java language array are handled by * this resolver . * @ param property * The index of the element in the array to return the acceptable type for . Will be * coerced into an integer , but otherwise ignored by this resolver . * @ param value * The value to be set at the given index . * @ throws PropertyNotFoundException * if the given index is out of bounds for this array . * @ throws ClassCastException * if the class of the specified element prevents it from being added to this array . * @ throws NullPointerException * if context is null * @ throws IllegalArgumentException * if the property could not be coerced into an integer , or if some aspect of the * specified element prevents it from being added to this array . * @ throws PropertyNotWritableException * if this resolver was constructed in read - only mode . * @ throws ELException * if an exception was thrown while performing the property or variable resolution . * The thrown exception must be included as the cause property of this exception , if * available . */ @ Override public void setValue ( ELContext context , Object base , Object property , Object value ) { } }
if ( context == null ) { throw new NullPointerException ( "context is null" ) ; } if ( isResolvable ( base ) ) { if ( readOnly ) { throw new PropertyNotWritableException ( "resolver is read-only" ) ; } Array . set ( base , toIndex ( base , property ) , value ) ; context . setPropertyResolved ( true ) ; }
public class UsbDeviceUtilities { /** * Return an integer product ID from a URI specifying a USB device . * @ param uri the USB device URI * @ throws DataSourceResourceException If the URI doesn ' t match the * format usb : / / vendor _ id / device _ id */ public static int productFromUri ( URI uri ) throws DataSourceResourceException { } }
try { return Integer . parseInt ( uri . getPath ( ) . substring ( 1 ) , 16 ) ; } catch ( NumberFormatException e ) { throw new DataSourceResourceException ( "USB device must be of the format " + DEFAULT_USB_DEVICE_URI + " -- the given " + uri + " has a bad product ID" ) ; } catch ( StringIndexOutOfBoundsException e ) { throw new DataSourceResourceException ( "USB device must be of the format " + DEFAULT_USB_DEVICE_URI + " -- the given " + uri + " has a bad product ID" ) ; }
public class PlanAssembler { /** * Check if we can push the limit node down . * Return a mid - plan send node , if one exists and can host a * distributed limit node . * There is guaranteed to be at most a single receive / send pair . * Abort the search if a node that a " limit " can ' t be pushed past * is found before its receive node . * Can only push past : * * coordinatingAggregator : a distributed aggregator * a copy of which has already been pushed down . * Distributing a LIMIT to just above that aggregator is correct . * ( I ' ve got some doubts that this is correct ? ? ? - - paul ) * * order by : if the plan requires a sort , getNextSelectPlan ( ) * will have already added an ORDER BY . * A distributed LIMIT will be added above a copy * of that ORDER BY node . * * projection : these have no effect on the application of limits . * @ param root * @ return If we can push the limit down , the send plan node is returned . * Otherwise null - - when the plan is single - partition when * its " coordinator " part contains a push - blocking node type . */ protected AbstractPlanNode checkLimitPushDownViability ( AbstractPlanNode root ) { } }
AbstractPlanNode receiveNode = root ; List < ParsedColInfo > orderBys = m_parsedSelect . orderByColumns ( ) ; boolean orderByCoversAllGroupBy = m_parsedSelect . groupByIsAnOrderByPermutation ( ) ; while ( ! ( receiveNode instanceof ReceivePlanNode ) ) { // Limitation : can only push past some nodes ( see above comment ) // Delete the aggregate node case to handle ENG - 6485, // or say we don ' t push down meeting aggregate node // TODO : We might want to optimize / push down " limit " for some cases if ( ! ( receiveNode instanceof OrderByPlanNode ) && ! ( receiveNode instanceof ProjectionPlanNode ) && ! isValidAggregateNodeForLimitPushdown ( receiveNode , orderBys , orderByCoversAllGroupBy ) ) { return null ; } if ( receiveNode instanceof OrderByPlanNode ) { // if grouping by the partition key , // limit can still push down if ordered by aggregate values . if ( ! m_parsedSelect . hasPartitionColumnInGroupby ( ) && isOrderByAggregationValue ( m_parsedSelect . orderByColumns ( ) ) ) { return null ; } } // Traverse . . . if ( receiveNode . getChildCount ( ) == 0 ) { return null ; } // nothing that allows pushing past has multiple inputs assert ( receiveNode . getChildCount ( ) == 1 ) ; receiveNode = receiveNode . getChild ( 0 ) ; } return receiveNode . getChild ( 0 ) ;
public class ObjectUtils { /** * Makes a shallow copy of the source object into the target one . * This method differs from { @ link ReflectionUtils # shallowCopyFieldState ( Object , Object ) } this doesn ' t require * source and target objects to share the same class hierarchy . * @ param source * the source object . * @ param target * the target object . */ public static void shallowCopy ( Object source , Object target ) { } }
ObjectUtils . doShallowCopy ( source , target , Boolean . TRUE ) ;
public class Comment { /** * Return serialField tags in this comment . */ SerialFieldTag [ ] serialFieldTags ( ) { } }
ListBuffer < SerialFieldTag > found = new ListBuffer < > ( ) ; for ( Tag next : tagList ) { if ( next instanceof SerialFieldTag ) { found . append ( ( SerialFieldTag ) next ) ; } } return found . toArray ( new SerialFieldTag [ found . length ( ) ] ) ;
public class DataMediaServiceImpl { /** * 添加 */ @ Override public void create ( DataMedia dataMedia ) { } }
Assert . assertNotNull ( dataMedia ) ; try { DataMediaDO dataMediaDo = modelToDo ( dataMedia ) ; dataMediaDo . setId ( 0L ) ; if ( ! dataMediaDao . checkUnique ( dataMediaDo ) ) { String exceptionCause = "exist the same name dataMedia in the database." ; logger . warn ( "WARN ## " + exceptionCause ) ; throw new RepeatConfigureException ( exceptionCause ) ; } dataMediaDao . insert ( dataMediaDo ) ; } catch ( RepeatConfigureException rce ) { throw rce ; } catch ( Exception e ) { logger . error ( "ERROR ## create dataMedia has an exception!" ) ; throw new ManagerException ( e ) ; }
public class JdbcUtils { /** * Retrieve all index names */ public static Set < String > getForeignKeys ( DriverTypeEnum . ConnectionProperties theConnectionProperties , String theTableName , String theForeignTable ) throws SQLException { } }
DataSource dataSource = Objects . requireNonNull ( theConnectionProperties . getDataSource ( ) ) ; try ( Connection connection = dataSource . getConnection ( ) ) { return theConnectionProperties . getTxTemplate ( ) . execute ( t -> { DatabaseMetaData metadata ; try { metadata = connection . getMetaData ( ) ; ResultSet indexes = metadata . getCrossReference ( connection . getCatalog ( ) , connection . getSchema ( ) , massageIdentifier ( metadata , theTableName ) , connection . getCatalog ( ) , connection . getSchema ( ) , massageIdentifier ( metadata , theForeignTable ) ) ; Set < String > columnNames = new HashSet < > ( ) ; while ( indexes . next ( ) ) { String tableName = toUpperCase ( indexes . getString ( "PKTABLE_NAME" ) , Locale . US ) ; if ( ! theTableName . equalsIgnoreCase ( tableName ) ) { continue ; } tableName = toUpperCase ( indexes . getString ( "FKTABLE_NAME" ) , Locale . US ) ; if ( ! theForeignTable . equalsIgnoreCase ( tableName ) ) { continue ; } String fkName = indexes . getString ( "FK_NAME" ) ; fkName = toUpperCase ( fkName , Locale . US ) ; columnNames . add ( fkName ) ; } return columnNames ; } catch ( SQLException e ) { throw new InternalErrorException ( e ) ; } } ) ; }
public class UniverseApi { /** * Get constellation information Get information on a constellation - - - This * route expires daily at 11:05 * @ param constellationId * constellation _ id integer ( required ) * @ param acceptLanguage * Language to use in the response ( optional , default to en - us ) * @ param datasource * The server name you would like data from ( optional , default to * tranquility ) * @ param ifNoneMatch * ETag from a previous request . A 304 will be returned if this * matches the current ETag ( optional ) * @ param language * Language to use in the response , takes precedence over * Accept - Language ( optional , default to en - us ) * @ return ConstellationResponse * @ throws ApiException * If fail to call the API , e . g . server error or cannot * deserialize the response body */ public ConstellationResponse getUniverseConstellationsConstellationId ( Integer constellationId , String acceptLanguage , String datasource , String ifNoneMatch , String language ) throws ApiException { } }
ApiResponse < ConstellationResponse > resp = getUniverseConstellationsConstellationIdWithHttpInfo ( constellationId , acceptLanguage , datasource , ifNoneMatch , language ) ; return resp . getData ( ) ;
public class QueryTracker { /** * Enforce query max runtime / execution time limits */ private void enforceTimeLimits ( ) { } }
for ( T query : queries . values ( ) ) { if ( query . isDone ( ) ) { continue ; } Duration queryMaxRunTime = getQueryMaxRunTime ( query . getSession ( ) ) ; Duration queryMaxExecutionTime = getQueryMaxExecutionTime ( query . getSession ( ) ) ; Optional < DateTime > executionStartTime = query . getExecutionStartTime ( ) ; DateTime createTime = query . getCreateTime ( ) ; if ( executionStartTime . isPresent ( ) && executionStartTime . get ( ) . plus ( queryMaxExecutionTime . toMillis ( ) ) . isBeforeNow ( ) ) { query . fail ( new PrestoException ( EXCEEDED_TIME_LIMIT , "Query exceeded the maximum execution time limit of " + queryMaxExecutionTime ) ) ; } if ( createTime . plus ( queryMaxRunTime . toMillis ( ) ) . isBeforeNow ( ) ) { query . fail ( new PrestoException ( EXCEEDED_TIME_LIMIT , "Query exceeded maximum time limit of " + queryMaxRunTime ) ) ; } }
public class sms_profile { /** * Use this API to fetch filtered set of sms _ profile resources . * filter string should be in JSON format . eg : " vm _ state : DOWN , name : [ a - z ] + " */ public static sms_profile [ ] get_filtered ( nitro_service service , String filter ) throws Exception { } }
sms_profile obj = new sms_profile ( ) ; options option = new options ( ) ; option . set_filter ( filter ) ; sms_profile [ ] response = ( sms_profile [ ] ) obj . getfiltered ( service , option ) ; return response ;
public class DeleteChecker { /** * Checks each item in the list of files stored at the endpoint ( relative * file paths ) against each sync directory to see if there is a matching * local file . If there is no matching file , that means that the file which * exists in the endpoint no longer exists in the local source directories * ( i . e . the source file has been deleted . ) Each file of this type is * removed from the endpoint . * Note that if a prefix is used , all files in the endpoint that do not * have the prefix will be removed ( as they cannot be consistent with * what the content ID will be for files pushed up with the prefix . ) */ public void run ( ) { } }
logger . info ( "Running Delete Checker" ) ; while ( filesList . hasNext ( ) && ! stopped ) { String contentId = filesList . next ( ) ; if ( null != prefix ) { // A prefix is being used if ( contentId . startsWith ( prefix ) ) { if ( ! exists ( contentId . substring ( prefix . length ( ) ) ) ) { deleteContent ( contentId ) ; } } else { // Content Id does not start with prefix deleteContent ( contentId ) ; } } else { // A prefix is not being used if ( ! exists ( contentId ) ) { deleteContent ( contentId ) ; } } } complete = true ;
public class RegularFile { /** * Transfers up to { @ code count } bytes to the given channel starting at position { @ code pos } in * this file . Returns the number of bytes transferred , possibly 0 . Note that unlike all other read * methods in this class , this method does not return - 1 if { @ code pos } is greater than or equal * to the current size . This for consistency with { @ link FileChannel # transferTo } , which this * method is primarily intended as an implementation of . */ public long transferTo ( long pos , long count , WritableByteChannel dest ) throws IOException { } }
long bytesToRead = bytesToRead ( pos , count ) ; if ( bytesToRead > 0 ) { long remaining = bytesToRead ; int blockIndex = blockIndex ( pos ) ; byte [ ] block = blocks [ blockIndex ] ; int off = offsetInBlock ( pos ) ; ByteBuffer buf = ByteBuffer . wrap ( block , off , length ( off , remaining ) ) ; while ( buf . hasRemaining ( ) ) { remaining -= dest . write ( buf ) ; } buf . clear ( ) ; while ( remaining > 0 ) { int index = ++ blockIndex ; block = blocks [ index ] ; buf = ByteBuffer . wrap ( block , 0 , length ( remaining ) ) ; while ( buf . hasRemaining ( ) ) { remaining -= dest . write ( buf ) ; } buf . clear ( ) ; } } return Math . max ( bytesToRead , 0 ) ; // don ' t return - 1 for this method
public class AppDeployer { /** * Deploy only cron . yaml . */ public void deployCron ( ) throws MojoExecutionException { } }
stager . stage ( ) ; try { deployMojo . getAppEngineFactory ( ) . deployment ( ) . deployCron ( configBuilder . buildDeployProjectConfigurationConfiguration ( appengineDirectory ) ) ; } catch ( AppEngineException ex ) { throw new MojoExecutionException ( "Failed to deploy" , ex ) ; }
public class CmsResourceFilter { /** * Returns an extended filter in order to avoid the given flags in the filtered resources . < p > * @ param flags the resource flags to exclude * @ return a filter excluding the given resource flags */ public CmsResourceFilter addExcludeFlags ( int flags ) { } }
CmsResourceFilter extendedFilter = ( CmsResourceFilter ) clone ( ) ; extendedFilter . m_flags = flags ; extendedFilter . m_filterFlags = EXCLUDED ; extendedFilter . updateCacheId ( ) ; return extendedFilter ;
public class LowercaseTransliterator { /** * System registration hook . */ static void register ( ) { } }
Transliterator . registerFactory ( _ID , new Transliterator . Factory ( ) { @ Override public Transliterator getInstance ( String ID ) { return new LowercaseTransliterator ( ULocale . US ) ; } } ) ; Transliterator . registerSpecialInverse ( "Lower" , "Upper" , true ) ;
public class MiniJPEWriterHandler { /** * Creates a GeoJSON from a List of { @ link JTSFeature } * @ param list * @ return */ public String toJSON ( List < JTSFeature > list ) { } }
Object fcJSON = this . startFeatureCollection ( ) ; for ( JTSFeature feature : list ) { Object f = this . startFeature ( ) ; Object p = this . startPoint ( ) ; try { this . addXToPoint ( feature . getGeometry ( ) . getGeometry ( ) . getCentroid ( ) . getX ( ) , p ) ; this . addYToPoint ( feature . getGeometry ( ) . getGeometry ( ) . getCentroid ( ) . getY ( ) , p ) ; } catch ( BaseException e ) { // TODO Auto - generated catch block e . printStackTrace ( ) ; } Iterator it = feature . getAttributes ( ) . keySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { String key = it . next ( ) . toString ( ) ; this . addElementToFeature ( feature . getAttribute ( key ) . value , key , f ) ; } p = this . endPoint ( p , null , null ) ; this . addPointToFeature ( f , p ) ; this . endFeature ( f ) ; this . addFeatureToCollection ( fcJSON , f ) ; } fcJSON = this . endFeatureCollection ( fcJSON ) ; return this . featureCollectionAsJSON ( ( org . json . JSONObject ) fcJSON ) ;
public class LinkedList { /** * ( non - Javadoc ) * @ see com . ibm . ws . objectManager . Collection # size ( com . ibm . ws . objectManager . Transaction ) */ public long size ( Transaction transaction ) throws ObjectManagerException { } }
if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . entry ( this , cclass , "size" , new Object [ ] { transaction } ) ; long listLength ; // For return ; synchronized ( this ) { synchronized ( availableSizeLock ) { listLength = availableSize ; } // synchronized ( availableSizeLock ) . if ( transaction != null ) { Token nextToken = head ; // Add links ToBeAdded within the transaction . while ( nextToken != null ) { Link nextLink = ( Link ) nextToken . getManagedObject ( ) ; if ( nextLink . state == Link . stateToBeAdded && nextLink . lockedBy ( transaction ) ) listLength ++ ; nextToken = nextLink . next ; } // while ( nextToken ! = null ) . } // if ( transaction ! = null ) } // synchronized ( this ) . if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . exit ( this , cclass , "size" , "returns listLength=" + listLength + "(long)" ) ; return listLength ;
public class VariantTabix { /** * Creates a sorted list of strings from the INFO KEYs * @ param attr { @ link Map } of key - value info pairs * @ return { @ link List } */ private List < String > decodeInfoKeys ( Map < String , String > attr ) { } }
// sorted key list List < String > keyList = attr . keySet ( ) . stream ( ) . map ( x -> x . toString ( ) ) . sorted ( ) . collect ( Collectors . toList ( ) ) ; return keyList ;
public class WikipediaQuickCheck { /** * / * public static void mainTest ( String [ ] args ) throws IOException { * TextFilter filter = new SwebleWikipediaTextFilter ( ) ; * String plainText = filter . filter ( " hallo \ n * eins \ n * zwei " ) ; * System . out . println ( plainText ) ; */ public static void main ( String [ ] args ) throws IOException , PageNotFoundException { } }
if ( args . length != 1 ) { System . out . println ( "Usage: " + WikipediaQuickCheck . class . getName ( ) + " <url>" ) ; System . exit ( 1 ) ; } WikipediaQuickCheck check = new WikipediaQuickCheck ( ) ; // URL examples : // String urlString = " http : / / de . wikipedia . org / wiki / Angela _ Merkel " ; // String urlString = " https : / / de . wikipedia . org / wiki / Benutzer _ Diskussion : Dnaber " ; // String urlString = " https : / / secure . wikimedia . org / wikipedia / de / wiki / Gütersloh " ; String urlString = args [ 0 ] ; MarkupAwareWikipediaResult result = check . checkPage ( new URL ( urlString ) , new ErrorMarker ( "***" , "***" ) ) ; int errorCount = 0 ; for ( AppliedRuleMatch match : result . getAppliedRuleMatches ( ) ) { RuleMatchApplication matchApplication = match . getRuleMatchApplications ( ) . get ( 0 ) ; RuleMatch ruleMatch = match . getRuleMatch ( ) ; Rule rule = ruleMatch . getRule ( ) ; System . out . println ( "" ) ; String message = ruleMatch . getMessage ( ) . replace ( "<suggestion>" , "'" ) . replace ( "</suggestion>" , "'" ) ; errorCount ++ ; System . out . print ( errorCount + ". " + message ) ; if ( rule instanceof AbstractPatternRule ) { System . out . println ( " (" + ( ( AbstractPatternRule ) rule ) . getFullId ( ) + ")" ) ; } else { System . out . println ( " (" + rule . getId ( ) + ")" ) ; } System . out . println ( " ..." + matchApplication . getOriginalErrorContext ( 50 ) . replace ( "\n" , "\\n" ) + "..." ) ; }