signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class AWSOpsWorksClient { /** * Specifies a user ' s permissions . For more information , see < a * href = " http : / / docs . aws . amazon . com / opsworks / latest / userguide / workingsecurity . html " > Security and Permissions < / a > . * < b > Required Permissions < / b > : To use this action , an IAM user must have a Manage permissions level for the stack , * or an attached policy that explicitly grants permissions . For more information on user permissions , see < a * href = " http : / / docs . aws . amazon . com / opsworks / latest / userguide / opsworks - security - users . html " > Managing User * Permissions < / a > . * @ param setPermissionRequest * @ return Result of the SetPermission operation returned by the service . * @ throws ValidationException * Indicates that a request was not valid . * @ throws ResourceNotFoundException * Indicates that a resource was not found . * @ sample AWSOpsWorks . SetPermission * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / opsworks - 2013-02-18 / SetPermission " target = " _ top " > AWS API * Documentation < / a > */ @ Override public SetPermissionResult setPermission ( SetPermissionRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeSetPermission ( request ) ;
public class Reflections { /** * 根据方法名称获取方法 * @ param cls * @ param methodName * @ return */ public static Method getDeclaredMethod ( final Class cls , final String methodName ) { } }
return getMethod ( cls , methodName , true ) ;
public class JSType { /** * Determines if the specified type is exempt from standard invariant templatized typing rules . */ static boolean isIThenableSubtype ( JSType type ) { } }
if ( type . isTemplatizedType ( ) ) { TemplatizedType ttype = type . toMaybeTemplatizedType ( ) ; return ttype . getTemplateTypeMap ( ) . hasTemplateKey ( ttype . registry . getThenableValueKey ( ) ) ; } return false ;
public class Neo4JGraph { /** * Executes the given statement on the current { @ link Graph } instance . WARNING : There is no * guarantee that the results are confined within the current { @ link Neo4JReadPartition } . * @ param statement The CYPHER statement . * @ return The { @ link StatementResult } with the CYPHER statement execution results . */ public StatementResult execute ( String statement ) { } }
Objects . requireNonNull ( statement , "statement cannot be null" ) ; // use overloaded method return execute ( new Statement ( statement ) ) ;
public class StorePackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public EClass getAction ( ) { } }
if ( actionEClass == null ) { actionEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( StorePackage . eNS_URI ) . getEClassifiers ( ) . get ( 114 ) ; } return actionEClass ;
public class Util { /** * On Windows , error messages for IOException aren ' t very helpful . * This method generates additional user - friendly error message to the listener */ public static void displayIOException ( @ Nonnull IOException e , @ Nonnull TaskListener listener ) { } }
String msg = getWin32ErrorMessage ( e ) ; if ( msg != null ) listener . getLogger ( ) . println ( msg ) ;
public class Math { /** * Returns the minimum value of an array . */ public static float min ( float [ ] x ) { } }
float m = Float . POSITIVE_INFINITY ; for ( float n : x ) { if ( n < m ) { m = n ; } } return m ;
public class Client { /** * Deletes the specified shovel from specified virtual host . * @ param vhost virtual host from where to delete the shovel * @ param shovelname Shovel to be deleted . */ public void deleteShovel ( String vhost , String shovelname ) { } }
this . deleteIgnoring404 ( uriWithPath ( "./parameters/shovel/" + encodePathSegment ( vhost ) + "/" + encodePathSegment ( shovelname ) ) ) ;
public class SyntheticTerminalDetector { /** * Answers { @ code true } if the given terminal rule is synthetic . That is , * the tokens for this rule will not be produced by the generated Antlr lexer * but manually in a custom token source . */ public boolean isSyntheticTerminalRule ( final TerminalRule rule ) { } }
AbstractElement _alternatives = rule . getAlternatives ( ) ; if ( ( _alternatives instanceof Keyword ) ) { AbstractElement _alternatives_1 = rule . getAlternatives ( ) ; String value = ( ( Keyword ) _alternatives_1 ) . getValue ( ) ; String _name = AntlrGrammarGenUtil . < TerminalRule > getOriginalElement ( rule ) . getName ( ) ; String _plus = ( "synthetic:" + _name ) ; return Objects . equal ( _plus , value ) ; } return false ;
public class WildFilesCLA { /** * { @ inheritDoc } */ @ Override protected void exportCommandLineData ( final StringBuilder out , final int occ ) { } }
uncompileQuoter ( out , getValue ( occ ) . get ( 0 ) ) ;
public class BBossESStarter { /** * Get Special elasticsearch server ConfigFile ClientInterface * @ param elasticsearchName elasticsearch server name which defined in bboss spring boot application configfile * @ param configFile * @ return */ public ClientInterface getConfigRestClient ( String elasticsearchName , String configFile ) { } }
return ElasticSearchHelper . getConfigRestClientUtil ( elasticsearchName , configFile ) ;
public class AbstractInstanceRegistry { /** * Updates the status of an instance . Normally happens to put an instance * between { @ link InstanceStatus # OUT _ OF _ SERVICE } and * { @ link InstanceStatus # UP } to put the instance in and out of traffic . * @ param appName the application name of the instance . * @ param id the unique identifier of the instance . * @ param newStatus the new { @ link InstanceStatus } . * @ param lastDirtyTimestamp last timestamp when this instance information was updated . * @ param isReplication true if this is a replication event from other nodes , false * otherwise . * @ return true if the status was successfully updated , false otherwise . */ @ Override public boolean statusUpdate ( String appName , String id , InstanceStatus newStatus , String lastDirtyTimestamp , boolean isReplication ) { } }
try { read . lock ( ) ; STATUS_UPDATE . increment ( isReplication ) ; Map < String , Lease < InstanceInfo > > gMap = registry . get ( appName ) ; Lease < InstanceInfo > lease = null ; if ( gMap != null ) { lease = gMap . get ( id ) ; } if ( lease == null ) { return false ; } else { lease . renew ( ) ; InstanceInfo info = lease . getHolder ( ) ; // Lease is always created with its instance info object . // This log statement is provided as a safeguard , in case this invariant is violated . if ( info == null ) { logger . error ( "Found Lease without a holder for instance id {}" , id ) ; } if ( ( info != null ) && ! ( info . getStatus ( ) . equals ( newStatus ) ) ) { // Mark service as UP if needed if ( InstanceStatus . UP . equals ( newStatus ) ) { lease . serviceUp ( ) ; } // This is NAC overriden status overriddenInstanceStatusMap . put ( id , newStatus ) ; // Set it for transfer of overridden status to replica on // replica start up info . setOverriddenStatus ( newStatus ) ; long replicaDirtyTimestamp = 0 ; info . setStatusWithoutDirty ( newStatus ) ; if ( lastDirtyTimestamp != null ) { replicaDirtyTimestamp = Long . valueOf ( lastDirtyTimestamp ) ; } // If the replication ' s dirty timestamp is more than the existing one , just update // it to the replica ' s . if ( replicaDirtyTimestamp > info . getLastDirtyTimestamp ( ) ) { info . setLastDirtyTimestamp ( replicaDirtyTimestamp ) ; } info . setActionType ( ActionType . MODIFIED ) ; recentlyChangedQueue . add ( new RecentlyChangedItem ( lease ) ) ; info . setLastUpdatedTimestamp ( ) ; invalidateCache ( appName , info . getVIPAddress ( ) , info . getSecureVipAddress ( ) ) ; } return true ; } } finally { read . unlock ( ) ; }
public class RectifyCalibrated { /** * Selects axises of new coordinate system */ private void selectAxises ( SimpleMatrix R1 , SimpleMatrix R2 , SimpleMatrix c1 , SimpleMatrix c2 ) { } }
// - - - - - Compute the new x - axis v1 . set ( c2 . get ( 0 ) - c1 . get ( 0 ) , c2 . get ( 1 ) - c1 . get ( 1 ) , c2 . get ( 2 ) - c1 . get ( 2 ) ) ; v1 . normalize ( ) ; // - - - - - Compute the new y - axis // cross product of old z axis and new x axis // According to the paper [ 1 ] this choice is arbitrary , however it is not . By selecting // the original axis the similarity with the first view is maximized . The other extreme // would be to make it perpendicular , resulting in an unusable rectification . // extract old z - axis from rotation matrix Vector3D_F64 oldZ = new Vector3D_F64 ( R1 . get ( 2 , 0 ) + R2 . get ( 2 , 0 ) , R1 . get ( 2 , 1 ) + R2 . get ( 2 , 1 ) , R1 . get ( 2 , 2 ) + R2 . get ( 2 , 2 ) ) ; GeometryMath_F64 . cross ( oldZ , v1 , v2 ) ; v2 . normalize ( ) ; // - - - - - Compute the new z - axis // simply the process product of the first two GeometryMath_F64 . cross ( v1 , v2 , v3 ) ; v3 . normalize ( ) ;
public class SystemSecurityContext { /** * The callable API throws a Exception and not a specific one */ @ SuppressWarnings ( { } }
"squid:S2221" , "squid:S00112" } ) public < T > T runAsSystemAsTenant ( final Callable < T > callable , final String tenant ) { final SecurityContext oldContext = SecurityContextHolder . getContext ( ) ; try { LOG . debug ( "entering system code execution" ) ; return tenantAware . runAsTenant ( tenant , ( ) -> { try { setSystemContext ( SecurityContextHolder . getContext ( ) ) ; return callable . call ( ) ; } catch ( final RuntimeException e ) { throw e ; } catch ( final Exception e ) { throw new RuntimeException ( e ) ; } } ) ; } finally { SecurityContextHolder . setContext ( oldContext ) ; LOG . debug ( "leaving system code execution" ) ; }
public class Solo { /** * Enters text in an EditText matching the specified index . * @ param index the index of the { @ link EditText } . { @ code 0 } if only one is available * @ param text the text to enter in the { @ link EditText } field */ public void enterText ( int index , String text ) { } }
if ( config . commandLogging ) { Log . d ( config . commandLoggingTag , "enterText(" + index + ", \"" + text + "\")" ) ; } textEnterer . setEditText ( waiter . waitForAndGetView ( index , EditText . class ) , text ) ;
public class VorbisFile { /** * 1 ) got a packet */ int process_packet ( int readp ) { } }
Page og = new Page ( ) ; // handle one packet . Try to fetch it from current stream state // extract packets from page while ( true ) { // process a packet if we can . If the machine isn ' t loaded , // neither is a page if ( decode_ready ) { Packet op = new Packet ( ) ; int result = os . packetout ( op ) ; long granulepos ; // if ( result = = - 1 ) return ( - 1 ) ; / / hole in the data . For now , swallow // and go . We ' ll need to add a real // error code in a bit . if ( result > 0 ) { // got a packet . process it granulepos = op . granulepos ; if ( vb . synthesis ( op ) == 0 ) { // lazy check for lazy // header handling . The // header packets aren ' t // audio , so if / when we // submit them , // vorbis _ synthesis will // reject them // suck in the synthesis data and track bitrate { int oldsamples = vd . synthesis_pcmout ( null , null ) ; vd . synthesis_blockin ( vb ) ; samptrack += vd . synthesis_pcmout ( null , null ) - oldsamples ; bittrack += op . bytes * 8 ; } // update the pcm offset . if ( granulepos != - 1 && op . e_o_s == 0 ) { int link = ( seekable ? current_link : 0 ) ; int samples ; // this packet has a pcm _ offset on it ( the last packet // completed on a page carries the offset ) After processing // ( above ) , we know the pcm position of the * last * sample // ready to be returned . Find the offset of the * first * // As an aside , this trick is inaccurate if we begin // reading anew right at the last page ; the end - of - stream // granulepos declares the last frame in the stream , and the // last packet of the last page may be a partial frame . // So , we need a previous granulepos from an in - sequence page // to have a reference point . Thus the ! op . e _ o _ s clause above samples = vd . synthesis_pcmout ( null , null ) ; granulepos -= samples ; for ( int i = 0 ; i < link ; i ++ ) { granulepos += pcmlengths [ i ] ; } pcm_offset = granulepos ; } return ( 1 ) ; } } } if ( readp == 0 ) { return ( 0 ) ; } if ( get_next_page ( og , - 1 ) < 0 ) { return ( 0 ) ; // eof . leave unitialized } // bitrate tracking ; add the header ' s bytes here , the body bytes // are done by packet above bittrack += og . header_len * 8 ; // has our decoding just traversed a bitstream boundary ? if ( decode_ready ) { if ( current_serialno != og . serialno ( ) ) { decode_clear ( ) ; } } // Do we need to load a new machine before submitting the page ? // This is different in the seekable and non - seekable cases . // In the seekable case , we already have all the header // information loaded and cached ; we just initialize the machine // with it and continue on our merry way . // In the non - seekable ( streaming ) case , we ' ll only be at a // boundary if we just left the previous logical bitstream and // we ' re now nominally at the header of the next bitstream if ( ! decode_ready ) { int i ; if ( seekable ) { current_serialno = og . serialno ( ) ; // match the serialno to bitstream section . We use this rather than // offset positions to avoid problems near logical bitstream // boundaries for ( i = 0 ; i < links ; i ++ ) { if ( serialnos [ i ] == current_serialno ) { break ; } } if ( i == links ) { return ( - 1 ) ; // sign of a bogus stream . error out , } // leave machine uninitialized current_link = i ; os . init ( current_serialno ) ; os . reset ( ) ; } else { // we ' re streaming // fetch the three header packets , build the info struct int foo [ ] = new int [ 1 ] ; int ret = fetch_headers ( vi [ 0 ] , vc [ 0 ] , foo , og ) ; current_serialno = foo [ 0 ] ; if ( ret != 0 ) { return ret ; } current_link ++ ; i = 0 ; } make_decode_ready ( ) ; } os . pagein ( og ) ; }
public class FieldHeading { /** * Sets the property for this field set . This includes * < ul > * < li > { @ link # level } < / li > * < / ul > * @ param _ name name / key of the property * @ param _ value value of the property * @ throws CacheReloadException from called super property method */ @ Override protected void setProperty ( final String _name , final String _value ) throws CacheReloadException { } }
if ( "Level" . equals ( _name ) ) { this . level = Integer . parseInt ( _value ) ; } else { super . setProperty ( _name , _value ) ; }
public class StreamConfiguration { /** * Set the bits per sample . Because this is not a value that may be * guessed and corrected , the value will be set to that given even if it is * not valid . * @ param bitsPerSample number of bits per sample * @ return true if value given is within the valid range , false otherwise . */ public boolean setBitsPerSample ( int bitsPerSample ) { } }
boolean result = ( ( bitsPerSample <= MAX_BITS_PER_SAMPLE ) && ( bitsPerSample >= MIN_BITS_PER_SAMPLE ) ) ; this . bitsPerSample = bitsPerSample ; return result ;
public class DigestAuthServerBuilder { /** * A server - specified data string which should be uniquely generated each time * a 401 response is made . It is recommended that this string be base64 or * hexadecimal data . Specifically , since the string is passed in the header * lines as a quoted string , the double - quote character is not allowed . < br > * The contents of the nonce are implementation dependent . The quality of the * implementation depends on a good choice . A nonce might , for example , be * constructed as the base 64 encoding of < br > * time - stamp H ( time - stamp " : " ETag " : " private - key ) < br > * where time - stamp is a server - generated time or other non - repeating value , * ETag is the value of the HTTP ETag header associated with the requested * entity , and private - key is data known only to the server . With a nonce of * this form a server would recalculate the hash portion after receiving the * client authentication header and reject the request if it did not match the * nonce from that header or if the time - stamp value is not recent enough . In * this way the server can limit the time of the nonce ’ s validity . The * inclusion of the ETag prevents a replay request for an updated version of * the resource . ( Note : including the IP address of the client in the nonce * would appear to offer the server the ability to limit the reuse of the * nonce to the same client that originally got it . However , that would break * proxy farms , where requests from a single user often go through different * proxies in the farm . Also , IP address spoofing is not that hard . ) < br > * An implementation might choose not to accept a previously used nonce or a * previously used digest , in order to protect against a replay attack . Or , an * implementation might choose to use one - time nonces or digests for POST or * PUT requests and a time - stamp for GET requests . For more details on the * issues involved see section 4 . of this document . < br > * The nonce is opaque to the client . * @ param sNonce * The nonce value to be set . May not be < code > null < / code > . * @ return this */ @ Nonnull public DigestAuthServerBuilder setNonce ( @ Nonnull final String sNonce ) { } }
if ( ! HttpStringHelper . isQuotedTextContent ( sNonce ) ) throw new IllegalArgumentException ( "nonce is invalid: " + sNonce ) ; m_sNonce = sNonce ; return this ;
public class AccountsInner { /** * Gets the first page of Azure Storage accounts , if any , linked to the specified Data Lake Analytics account . The response includes a link to the next page , if any . * @ param resourceGroupName The name of the Azure resource group that contains the Data Lake Analytics account . * @ param accountName The name of the Data Lake Analytics account for which to list Azure Storage accounts . * @ param filter The OData filter . Optional . * @ param top The number of items to return . Optional . * @ param skip The number of items to skip over before returning elements . Optional . * @ param expand OData expansion . Expand related resources in line with the retrieved resources , e . g . Categories / $ expand = Products would expand Product data in line with each Category entry . Optional . * @ param select OData Select statement . Limits the properties on each entry to just those requested , e . g . Categories ? $ select = CategoryName , Description . Optional . * @ param orderby OrderBy clause . One or more comma - separated expressions with an optional " asc " ( the default ) or " desc " depending on the order you ' d like the values sorted , e . g . Categories ? $ orderby = CategoryName desc . Optional . * @ param count The Boolean value of true or false to request a count of the matching resources included with the resources in the response , e . g . Categories ? $ count = true . Optional . * @ param search A free form search . A free - text search expression to match for whether a particular entry should be included in the feed , e . g . Categories ? $ search = blue OR green . Optional . * @ param format The desired return format . Return the response in particular formatxii without access to request headers for standard content - type negotiation ( e . g Orders ? $ format = json ) . Optional . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the PagedList & lt ; StorageAccountInfoInner & gt ; object */ public Observable < Page < StorageAccountInfoInner > > listStorageAccountsAsync ( final String resourceGroupName , final String accountName , final String filter , final Integer top , final Integer skip , final String expand , final String select , final String orderby , final Boolean count , final String search , final String format ) { } }
return listStorageAccountsWithServiceResponseAsync ( resourceGroupName , accountName , filter , top , skip , expand , select , orderby , count , search , format ) . map ( new Func1 < ServiceResponse < Page < StorageAccountInfoInner > > , Page < StorageAccountInfoInner > > ( ) { @ Override public Page < StorageAccountInfoInner > call ( ServiceResponse < Page < StorageAccountInfoInner > > response ) { return response . body ( ) ; } } ) ;
public class FormTool { /** * Constructs a checkbox input field with the specified name and * default value . */ public String checkbox ( String name , boolean defaultValue ) { } }
String value = getParameter ( name ) ; return fixedCheckbox ( name , ( value == null ) ? defaultValue : ! value . equals ( "" ) ) ;
public class DateTime { /** * Return the proper Calendar time unit as an integer given the string * @ param units The unit to parse * @ return An integer matching a Calendar . & lt ; UNIT & gt ; enum * @ throws IllegalArgumentException if the unit is null , empty or doesn ' t * match one of the configured units . * @ since 2.3 */ public static int unitsToCalendarType ( final String units ) { } }
if ( units == null || units . isEmpty ( ) ) { throw new IllegalArgumentException ( "Units cannot be null or empty" ) ; } final String lc = units . toLowerCase ( ) ; if ( lc . equals ( "ms" ) ) { return Calendar . MILLISECOND ; } else if ( lc . equals ( "s" ) ) { return Calendar . SECOND ; } else if ( lc . equals ( "m" ) ) { return Calendar . MINUTE ; } else if ( lc . equals ( "h" ) ) { return Calendar . HOUR_OF_DAY ; } else if ( lc . equals ( "d" ) ) { return Calendar . DAY_OF_MONTH ; } else if ( lc . equals ( "w" ) ) { return Calendar . DAY_OF_WEEK ; } else if ( lc . equals ( "n" ) ) { return Calendar . MONTH ; } else if ( lc . equals ( "y" ) ) { return Calendar . YEAR ; } throw new IllegalArgumentException ( "Unrecognized unit type: " + units ) ;
public class ModuleDeps { /** * Calls { @ link # subtract ( String , ModuleDepInfo ) } for each of the map * entries in < code > toSub < / code > * @ param toSub * The map to subtract from this map * @ return True if this map was modified */ public boolean subtractAll ( ModuleDeps toSub ) { } }
boolean modified = false ; for ( Map . Entry < String , ModuleDepInfo > entry : toSub . entrySet ( ) ) { modified |= subtract ( entry . getKey ( ) , entry . getValue ( ) ) ; } return modified ;
public class HttpRequest { /** * 设置内容主体 < br > * 请求体body参数支持两种类型 : * < pre > * 1 . 标准参数 , 例如 a = 1 & amp ; b = 2 这种格式 * 2 . Rest模式 , 此时body需要传入一个JSON或者XML字符串 , Hutool会自动绑定其对应的Content - Type * < / pre > * @ param body 请求体 * @ param contentType 请求体类型 , { @ code null } 表示自动判断类型 * @ return this */ public HttpRequest body ( String body , String contentType ) { } }
body ( StrUtil . bytes ( body , this . charset ) ) ; this . form = null ; // 当使用body时 , 停止form的使用 contentLength ( ( null != body ? body . length ( ) : 0 ) ) ; if ( null != contentType ) { // Content - Type自定义设置 this . contentType ( contentType ) ; } else { // 在用户未自定义的情况下自动根据内容判断 contentType = HttpUtil . getContentTypeByRequestBody ( body ) ; if ( null != contentType && ContentType . isDefault ( this . header ( Header . CONTENT_TYPE ) ) ) { if ( null != this . charset ) { // 附加编码信息 contentType = ContentType . build ( contentType , this . charset ) ; } this . contentType ( contentType ) ; } } // 判断是否为rest请求 if ( StrUtil . containsAnyIgnoreCase ( contentType , "json" , "xml" ) ) { this . isRest = true ; } return this ;
public class DevAuth { /** * get access _ token from openapi * @ param apiKey API key from console * @ param secretKey Secret Key from console * @ param config network config settings * @ return JsonObject of response from OAuth server */ public static JSONObject oauth ( String apiKey , String secretKey , AipClientConfiguration config ) { } }
try { AipRequest request = new AipRequest ( ) ; request . setUri ( new URI ( AipClientConst . OAUTH_URL ) ) ; request . addBody ( "grant_type" , "client_credentials" ) ; request . addBody ( "client_id" , apiKey ) ; request . addBody ( "client_secret" , secretKey ) ; request . setConfig ( config ) ; int statusCode = 500 ; AipResponse response = null ; // add retry int cnt = 0 ; while ( statusCode == 500 && cnt < 3 ) { response = AipHttpClient . post ( request ) ; statusCode = response . getStatus ( ) ; cnt ++ ; } String res = response . getBodyStr ( ) ; if ( res != null && ! res . equals ( "" ) ) { return new JSONObject ( res ) ; } else { return Util . getGeneralError ( statusCode , "Server response code: " + statusCode ) ; } } catch ( URISyntaxException e ) { e . printStackTrace ( ) ; } return Util . getGeneralError ( - 1 , "unknown error" ) ;
public class WebFacesConfigDescriptorImpl { /** * If not already created , a new < code > lifecycle < / code > element will be created and returned . * Otherwise , the first existing < code > lifecycle < / code > element will be returned . * @ return the instance defined for the element < code > lifecycle < / code > */ public FacesConfigLifecycleType < WebFacesConfigDescriptor > getOrCreateLifecycle ( ) { } }
List < Node > nodeList = model . get ( "lifecycle" ) ; if ( nodeList != null && nodeList . size ( ) > 0 ) { return new FacesConfigLifecycleTypeImpl < WebFacesConfigDescriptor > ( this , "lifecycle" , model , nodeList . get ( 0 ) ) ; } return createLifecycle ( ) ;
public class FluoConfiguration { /** * Returns the value of the property { @ value # CONNECTION _ RETRY _ TIMEOUT _ MS _ PROP } if it is set , else * the default value of { @ value # CONNECTION _ RETRY _ TIMEOUT _ MS _ DEFAULT } . The integer returned * represents milliseconds and is always positive . * @ since 1.2.0 */ public int getConnectionRetryTimeout ( ) { } }
int retval ; if ( containsKey ( CONNECTION_RETRY_TIMEOUT_MS_PROP ) ) { retval = getInt ( CONNECTION_RETRY_TIMEOUT_MS_PROP , CONNECTION_RETRY_TIMEOUT_MS_DEFAULT ) ; } else { retval = getInt ( CLIENT_RETRY_TIMEOUT_MS_PROP , CONNECTION_RETRY_TIMEOUT_MS_DEFAULT ) ; } Preconditions . checkArgument ( retval >= - 1 , CONNECTION_RETRY_TIMEOUT_MS_PROP + " must be >= -1" ) ; return retval ;
public class SoapClient { /** * 设置方法参数 * @ param name 参数名 * @ param value 参数值 , 可以是字符串或Map或 { @ link SOAPElement } * @ param useMethodPrefix 是否使用方法的命名空间前缀 * @ return this */ public SoapClient setParam ( String name , Object value , boolean useMethodPrefix ) { } }
setParam ( this . methodEle , name , value , useMethodPrefix ? this . methodEle . getPrefix ( ) : null ) ; return this ;
public class DefaultAlertService { /** * Evaluates all triggers associated with the notification and updates the job history . */ private void _processNotification ( Alert alert , History history , List < Metric > metrics , Map < BigInteger , Map < Metric , Long > > triggerFiredTimesAndMetricsByTrigger , Notification notification , Long alertEnqueueTimestamp ) { } }
// refocus notifier does not need cool down logic , and every evaluation needs to send notification boolean isRefocusNotifier = SupportedNotifier . REFOCUS . getName ( ) . equals ( notification . getNotifierName ( ) ) ; for ( Trigger trigger : notification . getTriggers ( ) ) { Map < Metric , Long > triggerFiredTimesForMetrics = triggerFiredTimesAndMetricsByTrigger . get ( trigger . getId ( ) ) ; for ( Metric m : metrics ) { if ( triggerFiredTimesForMetrics != null && triggerFiredTimesForMetrics . containsKey ( m ) ) { String logMessage = MessageFormat . format ( "The trigger {0} was evaluated against metric {1} and it is fired." , trigger . getName ( ) , m . getIdentifier ( ) ) ; history . appendMessageNUpdateHistory ( logMessage , null , 0 ) ; if ( isRefocusNotifier ) { sendNotification ( trigger , m , history , notification , alert , triggerFiredTimesForMetrics . get ( m ) , alertEnqueueTimestamp ) ; continue ; } if ( ! notification . onCooldown ( trigger , m ) ) { _updateNotificationSetActiveStatus ( trigger , m , history , notification ) ; sendNotification ( trigger , m , history , notification , alert , triggerFiredTimesForMetrics . get ( m ) , alertEnqueueTimestamp ) ; } else { logMessage = MessageFormat . format ( "The notification {0} is on cooldown until {1}." , notification . getName ( ) , getDateMMDDYYYY ( notification . getCooldownExpirationByTriggerAndMetric ( trigger , m ) ) ) ; history . appendMessageNUpdateHistory ( logMessage , null , 0 ) ; } } else { String logMessage = MessageFormat . format ( "The trigger {0} was evaluated against metric {1} and it is not fired." , trigger . getName ( ) , m . getIdentifier ( ) ) ; history . appendMessageNUpdateHistory ( logMessage , null , 0 ) ; if ( isRefocusNotifier ) { sendClearNotification ( trigger , m , history , notification , alert , alertEnqueueTimestamp ) ; continue ; } if ( notification . isActiveForTriggerAndMetric ( trigger , m ) ) { // This is case when the notification was active for the given trigger , metric combination // and the metric did not violate triggering condition on current evaluation . Hence we must clear it . _updateNotificationClearActiveStatus ( trigger , m , notification ) ; sendClearNotification ( trigger , m , history , notification , alert , alertEnqueueTimestamp ) ; } } } }
public class SlotPoolImpl { /** * Clear the internal state of the SlotPool . */ private void clear ( ) { } }
availableSlots . clear ( ) ; allocatedSlots . clear ( ) ; pendingRequests . clear ( ) ; waitingForResourceManager . clear ( ) ; registeredTaskManagers . clear ( ) ;
public class MRAsyncDiskService { /** * Move the path name on one volume to a temporary location and then * delete them . * This functions returns when the moves are done , but not necessarily all * deletions are done . This is usually good enough because applications * won ' t see the path name under the old name anyway after the move . * @ param volume The disk volume * @ param pathName The path name relative to volume root . * @ throws IOException If the move failed * @ return false if the file is not found */ public boolean moveAndDeleteRelativePath ( String volume , String pathName ) throws IOException { } }
volume = normalizePath ( volume ) ; // Move the file right now , so that it can be deleted later String newPathName = format . format ( new Date ( ) ) + "_" + uniqueId . getAndIncrement ( ) ; newPathName = TOBEDELETED + Path . SEPARATOR_CHAR + newPathName ; Path source = new Path ( volume , pathName ) ; Path target = new Path ( volume , newPathName ) ; try { if ( ! localFileSystem . rename ( source , target ) ) { // If the source does not exists , return false . // This is necessary because rename can return false if the source // does not exists . if ( ! localFileSystem . exists ( source ) ) { return false ; } // Try to recreate the parent directory just in case it gets deleted . if ( ! localFileSystem . mkdirs ( new Path ( volume , TOBEDELETED ) ) ) { throw new IOException ( "Cannot create " + TOBEDELETED + " under " + volume ) ; } // Try rename again . If it fails , return false . if ( ! localFileSystem . rename ( source , target ) ) { throw new IOException ( "Cannot rename " + source + " to " + target ) ; } } } catch ( FileNotFoundException e ) { // Return false in case that the file is not found . return false ; } DeleteTask task = new DeleteTask ( volume , pathName , newPathName ) ; execute ( volume , task ) ; return true ;
import java . io . * ; import java . lang . * ; import java . util . * ; import java . math . * ; class Main { /** * Function to calculate the last digit when the factorial of first _ num divides * the factorial of second _ num . * Examples : * Main . calculateFinalDigit ( 2 , 4 ) - > 2 * Main . calculateFinalDigit ( 6 , 8 ) - > 6 * Main . calculateFinalDigit ( 1 , 2 ) - > 2 * Args : * first _ num , second _ num : two integers where first _ num < second _ num * Returns : * An integer that is the last digit of the factorial division result */ public static int calculateFinalDigit ( int first_num , int second_num ) { } }
int product = 1 ; if ( first_num == second_num ) { return 1 ; } else if ( second_num - first_num >= 5 ) { return 0 ; } else { for ( int factor = first_num + 1 ; factor <= second_num ; factor ++ ) { product = ( product * ( factor % 10 ) ) % 10 ; } return product % 10 ; }
public class EbeanQueryChannelService { /** * Return an query for finding a List , Set , Map or single bean . * @ return the Query . */ public static < T > Query < T > query ( Class < T > entityType , String fetchPath , Object queryObject , Pageable pageable ) { } }
return query ( db ( ) , entityType , fetchPath , queryObject , pageable ) ;
public class TimePoint { /** * / * [ deutsch ] * < p > Bestimmt das Minimum der beiden Zeitpunkte . < / p > * @ param < U > generic type of time units compatible to { @ link ChronoUnit } * @ param < T > generic type of self reference * @ param t1 first time point * @ param t2 second time point * @ return minimum of t1 and t2 */ public static < U , T extends TimePoint < U , T > > T min ( T t1 , T t2 ) { } }
return ( t1 . compareTo ( t2 ) > 0 ) ? t2 : t1 ;
public class LocPathIterator { /** * Execute an expression in the XPath runtime context , and return the * result of the expression . * @ param xctxt The XPath runtime context . * @ param handler The target content handler . * @ return The result of the expression in the form of a < code > XObject < / code > . * @ throws javax . xml . transform . TransformerException if a runtime exception * occurs . * @ throws org . xml . sax . SAXException */ public void executeCharsToContentHandler ( XPathContext xctxt , org . xml . sax . ContentHandler handler ) throws javax . xml . transform . TransformerException , org . xml . sax . SAXException { } }
LocPathIterator clone = ( LocPathIterator ) m_clones . getInstance ( ) ; int current = xctxt . getCurrentNode ( ) ; clone . setRoot ( current , xctxt ) ; int node = clone . nextNode ( ) ; DTM dtm = clone . getDTM ( node ) ; clone . detach ( ) ; if ( node != DTM . NULL ) { dtm . dispatchCharactersEvents ( node , handler , false ) ; }
public class JCGLProfiling { /** * Trim any cached internal storage . * This is primarily useful because implementations are expected to reuse a * lot of data structures internally ( because the graph of renderers and * filters being profiled rarely changes ) . */ @ Override public void trimContexts ( ) { } }
for ( int index = 0 ; index < this . frames . length ; ++ index ) { final Frame f = this . frames [ index ] ; f . trimRecursive ( ) ; } this . frame_index = 0 ;
public class SarlBehaviorImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public void eUnset ( int featureID ) { } }
switch ( featureID ) { case SarlPackage . SARL_BEHAVIOR__EXTENDS : setExtends ( ( JvmParameterizedTypeReference ) null ) ; return ; } super . eUnset ( featureID ) ;
public class Client { /** * Writes a report file including the results of the * { @ link DumpProcessingAction # getReport ( ) } methods . If there is no report * filename specified the reports will be logged . * @ throws IOException */ void writeReport ( ) throws IOException { } }
StringBuilder builder = new StringBuilder ( ) ; for ( DumpProcessingAction action : this . clientConfiguration . getActions ( ) ) { if ( this . clientConfiguration . getReportFileName ( ) != null ) { builder . append ( action . getActionName ( ) ) ; builder . append ( ": " ) ; if ( action . isReady ( ) ) { builder . append ( action . getReport ( ) ) ; } else { builder . append ( "Action was not executed." ) ; } builder . append ( System . getProperty ( "line.separator" ) ) ; } else { logger . info ( action . getActionName ( ) + ": " + action . getReport ( ) ) ; } } if ( this . clientConfiguration . getReportFileName ( ) != null ) { Path outputDirectory = Paths . get ( this . clientConfiguration . getReportFileName ( ) ) . getParent ( ) ; if ( outputDirectory == null ) { outputDirectory = Paths . get ( "." ) ; } DirectoryManager dm = DirectoryManagerFactory . createDirectoryManager ( outputDirectory , false ) ; OutputStream out = dm . getOutputStreamForFile ( Paths . get ( this . clientConfiguration . getReportFileName ( ) ) . getFileName ( ) . toString ( ) ) ; out . write ( builder . toString ( ) . getBytes ( StandardCharsets . UTF_8 ) ) ; out . close ( ) ; }
public class LayerReducerUtil { /** * Compute soil layer thickness * @ param soilsData * @ return */ public static ArrayList < HashMap < String , String > > computeSoilLayerSize ( ArrayList < HashMap < String , String > > soilsData ) { } }
float deep = 0.0f ; ArrayList < HashMap < String , String > > newSoilsData ; newSoilsData = new ArrayList < HashMap < String , String > > ( ) ; for ( HashMap < String , String > currentSoil : soilsData ) { // create a new soil with reference parameters HashMap < String , String > newCurrentSoil = new HashMap < String , String > ( currentSoil ) ; // Specific for stics soil data representation newCurrentSoil . put ( LayerReducer . SLLB , new Float ( parseFloat ( currentSoil . get ( LayerReducer . SLLB ) ) - deep ) . toString ( ) ) ; deep = parseFloat ( currentSoil . get ( LayerReducer . SLLB ) ) ; newSoilsData . add ( newCurrentSoil ) ; } return newSoilsData ;
public class HTTPInputStream { /** * subclasses should overwrite this function */ protected Socket openSocket ( String host , int port ) throws IOException { } }
return SocketFactory . getDefault ( ) . createSocket ( host , port ) ;
public class XmlUtils { /** * Converts the string value to the java object for the given primitive category * @ param value * the value * @ param primitiveCategory * the primitive category * @ return the java object */ public static Object getPrimitiveValue ( String value , PrimitiveCategory primitiveCategory ) { } }
if ( value != null ) { try { switch ( primitiveCategory ) { case BOOLEAN : return Boolean . valueOf ( value ) ; case BYTE : return Byte . valueOf ( value ) ; case DOUBLE : return Double . valueOf ( value ) ; case FLOAT : return Float . valueOf ( value ) ; case INT : return Integer . valueOf ( value ) ; case LONG : return Long . valueOf ( value ) ; case SHORT : return Short . valueOf ( value ) ; case STRING : return value ; default : throw new IllegalStateException ( primitiveCategory . toString ( ) ) ; } } catch ( Exception ignored ) { } } return null ;
public class AbstractCxxPublicApiVisitor { /** * Find documentation node , associated documentation , identifier of a * < em > public < / em > member declarator and visit it as a public API . * @ param node the < em > public < / em > member declarator to visit */ private void visitMemberDeclarator ( AstNode node ) { } }
if ( isOverriddenMethod ( node ) ) { // assume that ancestor method is documented // and do not count as public API return ; } AstNode container = node . getFirstAncestor ( CxxGrammarImpl . templateDeclaration , CxxGrammarImpl . classSpecifier ) ; AstNode docNode = node ; List < Token > comments ; if ( container == null || container . getType ( ) . equals ( CxxGrammarImpl . classSpecifier ) ) { comments = getBlockDocumentation ( docNode ) ; } else { // template do { comments = getBlockDocumentation ( container ) ; if ( ! comments . isEmpty ( ) ) { break ; } container = container . getFirstAncestor ( CxxGrammarImpl . templateDeclaration ) ; } while ( container != null ) ; } // documentation may be inlined if ( comments . isEmpty ( ) ) { comments = getDeclaratorInlineComment ( node ) ; } // find the identifier to present to concrete visitors String id = null ; // first look for an operator function id AstNode idNode = node . getFirstDescendant ( CxxGrammarImpl . operatorFunctionId ) ; if ( idNode != null ) { id = getOperatorId ( idNode ) ; } else { // look for a declarator id idNode = node . getFirstDescendant ( CxxGrammarImpl . declaratorId ) ; if ( idNode != null ) { id = idNode . getTokenValue ( ) ; } else { // look for an identifier ( e . g in bitfield declaration ) idNode = node . getFirstDescendant ( GenericTokenType . IDENTIFIER ) ; if ( idNode != null ) { id = idNode . getTokenValue ( ) ; } else { LOG . error ( "Unsupported declarator at {}" , node . getTokenLine ( ) ) ; } } } if ( idNode != null && id != null ) { visitPublicApi ( idNode , id , comments ) ; }
public class StyledNamingConvention { @ Override public String fromActionNameToPath ( final String actionName ) { } }
if ( ! actionName . endsWith ( actionSuffix ) ) { throw new IllegalArgumentException ( actionName ) ; } String name = actionName . substring ( 0 , actionName . length ( ) - actionSuffix . length ( ) ) ; return adjustViewRootPath ( ) + "/" + name . replace ( PACKAGE_SEPARATOR , '/' ) + viewExtension ;
public class FastProtocolRegister { /** * Tries to get a method given the id . * Returns null if no such method is registered . */ public static Method tryGetMethod ( String id ) { } }
if ( id . length ( ) != NAME_LEN ) { // we use it to fast discard the request without doing map lookup return null ; } return idToMethod . get ( id ) ;
public class JvmTypeConstraintImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public NotificationChain eInverseRemove ( InternalEObject otherEnd , int featureID , NotificationChain msgs ) { } }
switch ( featureID ) { case TypesPackage . JVM_TYPE_CONSTRAINT__TYPE_REFERENCE : return basicSetTypeReference ( null , msgs ) ; case TypesPackage . JVM_TYPE_CONSTRAINT__OWNER : return basicSetOwner ( null , msgs ) ; } return super . eInverseRemove ( otherEnd , featureID , msgs ) ;
public class DirectCompilerVisitor { /** * NOTE : technically this rule of the grammar does not have an equivalent Java expression ( or a valid FEEL expression ) per - se . * Using here as assuming if this grammar rule trigger , it is intended as a List , either to be returned , or re - used internally in this visitor . */ @ Override public DirectCompilerResult visitExpressionList ( FEEL_1_1Parser . ExpressionListContext ctx ) { } }
List < DirectCompilerResult > exprs = new ArrayList < > ( ) ; for ( int i = 0 ; i < ctx . getChildCount ( ) ; i ++ ) { if ( ctx . getChild ( i ) instanceof FEEL_1_1Parser . ExpressionContext ) { FEEL_1_1Parser . ExpressionContext childCtx = ( FEEL_1_1Parser . ExpressionContext ) ctx . getChild ( i ) ; DirectCompilerResult child = visit ( childCtx ) ; exprs . add ( child ) ; } } MethodCallExpr list = new MethodCallExpr ( null , "list" ) ; exprs . stream ( ) . map ( DirectCompilerResult :: getExpression ) . forEach ( list :: addArgument ) ; return DirectCompilerResult . of ( list , BuiltInType . LIST , DirectCompilerResult . mergeFDs ( exprs . toArray ( new DirectCompilerResult [ ] { } ) ) ) ;
public class BigtableInstanceAdminClient { /** * Lists all clusters in the specified instance . * < p > This method will throw a { @ link PartialListClustersException } when any zone is unavailable . * If partial listing are ok , the exception can be caught and inspected . * < p > Sample code : * < pre > { @ code * try { * List < Cluster > clusters = client . listClusters ( " my - instance " ) ; * } catch ( PartialListClustersException e ) { * System . out . println ( " The following zones are unavailable : " + e . getUnavailableZones ( ) ) ; * System . out . println ( " But the following clusters are reachable : " + e . getClusters ( ) ) * } < / pre > */ @ SuppressWarnings ( "WeakerAccess" ) public List < Cluster > listClusters ( String instanceId ) { } }
return ApiExceptions . callAndTranslateApiException ( listClustersAsync ( instanceId ) ) ;
public class HealthChecker { /** * Returns information about a dependency , including the result of checking its health . */ public HealthDependencyDto doDependencyAvailabilityCheck ( String name ) { } }
HealthDependency dependency = healthDependencies . get ( checkNotNull ( name ) ) ; if ( dependency == null ) { throw new WebApplicationException ( Response . status ( 404 ) . build ( ) ) ; } return checkDependencyHealth ( dependency ) ;
public class DescribeScalableTargetsRequest { /** * The identifier of the resource associated with the scalable target . This string consists of the resource type and * unique identifier . If you specify a scalable dimension , you must also specify a resource ID . * < ul > * < li > * ECS service - The resource type is < code > service < / code > and the unique identifier is the cluster name and service * name . Example : < code > service / default / sample - webapp < / code > . * < / li > * < li > * Spot fleet request - The resource type is < code > spot - fleet - request < / code > and the unique identifier is the Spot * fleet request ID . Example : < code > spot - fleet - request / sfr - 73fbd2ce - aa30-494c - 8788-1cee4EXAMPLE < / code > . * < / li > * < li > * EMR cluster - The resource type is < code > instancegroup < / code > and the unique identifier is the cluster ID and * instance group ID . Example : < code > instancegroup / j - 2EEZNYKUA1NTV / ig - 1791Y4E1L8YI0 < / code > . * < / li > * < li > * AppStream 2.0 fleet - The resource type is < code > fleet < / code > and the unique identifier is the fleet name . * Example : < code > fleet / sample - fleet < / code > . * < / li > * < li > * DynamoDB table - The resource type is < code > table < / code > and the unique identifier is the resource ID . Example : * < code > table / my - table < / code > . * < / li > * < li > * DynamoDB global secondary index - The resource type is < code > index < / code > and the unique identifier is the * resource ID . Example : < code > table / my - table / index / my - table - index < / code > . * < / li > * < li > * Aurora DB cluster - The resource type is < code > cluster < / code > and the unique identifier is the cluster name . * Example : < code > cluster : my - db - cluster < / code > . * < / li > * < li > * Amazon SageMaker endpoint variants - The resource type is < code > variant < / code > and the unique identifier is the * resource ID . Example : < code > endpoint / my - end - point / variant / KMeansClustering < / code > . * < / li > * < li > * Custom resources are not supported with a resource type . This parameter must specify the < code > OutputValue < / code > * from the CloudFormation template stack used to access the resources . The unique identifier is defined by the * service provider . More information is available in our < a * href = " https : / / github . com / aws / aws - auto - scaling - custom - resource " > GitHub repository < / a > . * < / li > * < / ul > * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setResourceIds ( java . util . Collection ) } or { @ link # withResourceIds ( java . util . Collection ) } if you want to * override the existing values . * @ param resourceIds * The identifier of the resource associated with the scalable target . This string consists of the resource * type and unique identifier . If you specify a scalable dimension , you must also specify a resource ID . < / p > * < ul > * < li > * ECS service - The resource type is < code > service < / code > and the unique identifier is the cluster name and * service name . Example : < code > service / default / sample - webapp < / code > . * < / li > * < li > * Spot fleet request - The resource type is < code > spot - fleet - request < / code > and the unique identifier is the * Spot fleet request ID . Example : < code > spot - fleet - request / sfr - 73fbd2ce - aa30-494c - 8788-1cee4EXAMPLE < / code > . * < / li > * < li > * EMR cluster - The resource type is < code > instancegroup < / code > and the unique identifier is the cluster ID * and instance group ID . Example : < code > instancegroup / j - 2EEZNYKUA1NTV / ig - 1791Y4E1L8YI0 < / code > . * < / li > * < li > * AppStream 2.0 fleet - The resource type is < code > fleet < / code > and the unique identifier is the fleet name . * Example : < code > fleet / sample - fleet < / code > . * < / li > * < li > * DynamoDB table - The resource type is < code > table < / code > and the unique identifier is the resource ID . * Example : < code > table / my - table < / code > . * < / li > * < li > * DynamoDB global secondary index - The resource type is < code > index < / code > and the unique identifier is the * resource ID . Example : < code > table / my - table / index / my - table - index < / code > . * < / li > * < li > * Aurora DB cluster - The resource type is < code > cluster < / code > and the unique identifier is the cluster * name . Example : < code > cluster : my - db - cluster < / code > . * < / li > * < li > * Amazon SageMaker endpoint variants - The resource type is < code > variant < / code > and the unique identifier * is the resource ID . Example : < code > endpoint / my - end - point / variant / KMeansClustering < / code > . * < / li > * < li > * Custom resources are not supported with a resource type . This parameter must specify the * < code > OutputValue < / code > from the CloudFormation template stack used to access the resources . The unique * identifier is defined by the service provider . More information is available in our < a * href = " https : / / github . com / aws / aws - auto - scaling - custom - resource " > GitHub repository < / a > . * < / li > * @ return Returns a reference to this object so that method calls can be chained together . */ public DescribeScalableTargetsRequest withResourceIds ( String ... resourceIds ) { } }
if ( this . resourceIds == null ) { setResourceIds ( new java . util . ArrayList < String > ( resourceIds . length ) ) ; } for ( String ele : resourceIds ) { this . resourceIds . add ( ele ) ; } return this ;
public class DiscordApiImpl { /** * Removes a channel from the cache . * @ param channelId The id of the channel to remove . */ public void removeChannelFromCache ( long channelId ) { } }
channels . computeIfPresent ( channelId , ( key , channel ) -> { if ( channel instanceof Cleanupable ) { ( ( Cleanupable ) channel ) . cleanup ( ) ; } return null ; } ) ;
public class ChallengeCache { /** * Gets authority of a url . * @ param url * the url to get the authority for . * @ return the authority . */ public String getAuthority ( HttpUrl url ) { } }
String scheme = url . scheme ( ) ; String host = url . host ( ) ; int port = url . port ( ) ; StringBuilder builder = new StringBuilder ( ) ; if ( scheme != null ) { builder . append ( scheme ) . append ( "://" ) ; } builder . append ( host ) ; if ( port >= 0 ) { builder . append ( ':' ) . append ( port ) ; } return builder . toString ( ) ;
public class DefaultSystemStateRestorer { /** * Reads all journals and restores all previous system mode state from them * into the given repository . Also , re - publish all events that were not sent * by the reason of failure , abortion , etc . * @ param repository into which latest state of model will be loaded * @ return information about last system state , such as last transactionId , etc . */ @ Override public SystemState restore ( long fromVersion , TxRepository repository ) { } }
workflowContext . repository ( repository ) ; final long snapshotTransactionId = repository . getO ( SystemInfo . class , 0L ) . orElse ( new SystemInfo ( 0L ) ) . lastTransactionId ; final long [ ] transactionId = { snapshotTransactionId } ; try ( InputProcessor processor = new DefaultInputProcessor ( journalStorage ) ) { processor . process ( fromVersion , b -> { EventsCommitInfo e = eventsContext . serializer ( ) . deserialize ( eventsContext . eventsCommitBuilder ( ) , b ) ; eventBus . processNextEvent ( e ) ; } , JournalType . EVENTS ) ; processor . process ( fromVersion , b -> { TransactionCommitInfo tx = workflowContext . serializer ( ) . deserialize ( workflowContext . transactionCommitBuilder ( ) , b ) ; if ( tx . transactionId ( ) > transactionId [ 0 ] || tx . transactionId ( ) == snapshotTransactionId ) { transactionId [ 0 ] = tx . transactionId ( ) ; workflowEngine . getPipe ( ) . executeRestore ( eventBus , tx ) ; } else if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Transaction ID {} less than last Transaction ID {}" , tx . transactionId ( ) , transactionId [ 0 ] ) ; } } , JournalType . TRANSACTIONS ) ; } catch ( Throwable t ) { LOG . error ( "restore" , t ) ; throw new RuntimeException ( t ) ; } workflowEngine . getPipe ( ) . sync ( ) ; workflowContext . eventPublisher ( ) . getPipe ( ) . sync ( ) ; return new SystemState ( transactionId [ 0 ] ) ;
public class LoggerWrapper { /** * Log a DOM node at the FINER level * @ param msg The message to show with the node , or null if no message needed * @ param node * @ see Node */ public void logDomNode ( String msg , Node node ) { } }
StackTraceElement caller = StackTraceUtils . getCallerStackTraceElement ( ) ; logDomNode ( msg , node , Level . FINER , caller ) ;
public class EntryWrappingInterceptor { /** * Locks the value for the keys accessed by the command to avoid being override from a remote get . */ protected Object setSkipRemoteGetsAndInvokeNextForManyEntriesCommand ( InvocationContext ctx , WriteCommand command ) { } }
return invokeNextThenApply ( ctx , command , applyAndFixVersionForMany ) ;
public class AfplibFactoryImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public String convertSTCPRECSIONToString ( EDataType eDataType , Object instanceValue ) { } }
return instanceValue == null ? null : instanceValue . toString ( ) ;
public class VoiceMarshaller { /** * Marshall the given parameter object . */ public void marshall ( Voice voice , ProtocolMarshaller protocolMarshaller ) { } }
if ( voice == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( voice . getGender ( ) , GENDER_BINDING ) ; protocolMarshaller . marshall ( voice . getId ( ) , ID_BINDING ) ; protocolMarshaller . marshall ( voice . getLanguageCode ( ) , LANGUAGECODE_BINDING ) ; protocolMarshaller . marshall ( voice . getLanguageName ( ) , LANGUAGENAME_BINDING ) ; protocolMarshaller . marshall ( voice . getName ( ) , NAME_BINDING ) ; protocolMarshaller . marshall ( voice . getAdditionalLanguageCodes ( ) , ADDITIONALLANGUAGECODES_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class JsiiEngine { /** * Given an uninitialized native object instance , reads the @ Jsii annotations to determine * the jsii module and FQN , and creates a JS object . * Any methods implemented on the native object are passed in as " overrides " , which are overridden * in the javascript side to call - back to the native method . * @ param uninitializedNativeObject An uninitialized native object * @ param args Initializer arguments * @ return An object reference for the new object . */ public JsiiObjectRef createNewObject ( final Object uninitializedNativeObject , final Object ... args ) { } }
Class < ? extends Object > klass = uninitializedNativeObject . getClass ( ) ; Jsii jsii = tryGetJsiiAnnotation ( klass , true ) ; String fqn = "Object" ; // if we can ' t determine FQN , we just create an empty JS object if ( jsii != null ) { fqn = jsii . fqn ( ) ; loadModule ( jsii . module ( ) ) ; } Collection < JsiiOverride > overrides = discoverOverrides ( klass ) ; JsiiObjectRef objRef = this . getClient ( ) . createObject ( fqn , Arrays . asList ( args ) , overrides ) ; registerObject ( objRef , uninitializedNativeObject ) ; return objRef ;
public class BigQuerySnippets { /** * [ VARIABLE " my _ job _ name " ] */ public Job getJobFromId ( String jobName ) { } }
// [ START ] JobId jobIdObject = JobId . of ( jobName ) ; Job job = bigquery . getJob ( jobIdObject ) ; if ( job == null ) { // job was not found } // [ END ] return job ;
public class ST_SunPosition { /** * Return the sun position for a given date * @ param point * @ param date * @ return * @ throws IllegalArgumentException */ public static Geometry sunPosition ( Geometry point , Date date ) throws IllegalArgumentException { } }
if ( point == null ) { return null ; } if ( point instanceof Point ) { Coordinate coord = point . getCoordinate ( ) ; return point . getFactory ( ) . createPoint ( SunCalc . getPosition ( date , coord . y , coord . x ) ) ; } else { throw new IllegalArgumentException ( "The sun position is computed according a point location." ) ; }
public class PEMKeyStore { /** * Load the keystore from the supplied input stream . Unlike many other * implementations of keystore ( most notably the default JKS * implementation ) , the input stream does not hold the keystore objects . * Instead , it must be a properties file defining the locations of the * keystore objects . The password is not used . * @ param inputStream * An input stream to the properties file . * @ param chars * The password is not used . * @ throws IOException * @ throws NoSuchAlgorithmException * @ throws CertificateException */ @ Override public void engineLoad ( InputStream inputStream , char [ ] chars ) throws IOException , NoSuchAlgorithmException , CertificateException { } }
try { Properties properties = new Properties ( ) ; if ( inputStream != null ) { properties . load ( inputStream ) ; if ( properties . size ( ) == 0 ) { throw new CertificateException ( "Properties file for configuration was empty?" ) ; } } else { if ( chars == null ) { // keyStore . load ( null , null ) - > in memory only keystore inMemoryOnly = true ; } } String defaultDirectoryString = properties . getProperty ( DEFAULT_DIRECTORY_KEY ) ; String directoryListString = properties . getProperty ( DIRECTORY_LIST_KEY ) ; String proxyFilename = properties . getProperty ( PROXY_FILENAME ) ; String certFilename = properties . getProperty ( CERTIFICATE_FILENAME ) ; String keyFilename = properties . getProperty ( KEY_FILENAME ) ; initialize ( defaultDirectoryString , directoryListString , proxyFilename , certFilename , keyFilename ) ; } finally { if ( inputStream != null ) { try { inputStream . close ( ) ; } catch ( IOException e ) { logger . info ( "Error closing inputStream" , e ) ; } } }
public class MultiMap { /** * Add values to multi valued entry . If the entry is single valued , it is * converted to the first value of a multi valued entry . * @ param name The entry key . * @ param values The List of multiple values . */ public void addValues ( String name , List < V > values ) { } }
List < V > lo = get ( name ) ; if ( lo == null ) { lo = new ArrayList < > ( ) ; } lo . addAll ( values ) ; put ( name , lo ) ;
public class VirtualNetworkLinksInner { /** * Updates a virtual network link to the specified Private DNS zone . * @ param resourceGroupName The name of the resource group . * @ param privateZoneName The name of the Private DNS zone ( without a terminating dot ) . * @ param virtualNetworkLinkName The name of the virtual network link . * @ param parameters Parameters supplied to the Update operation . * @ param ifMatch The ETag of the virtual network link to the Private DNS zone . Omit this value to always overwrite the current virtual network link . Specify the last - seen ETag value to prevent accidentally overwriting any concurrent changes . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < VirtualNetworkLinkInner > updateAsync ( String resourceGroupName , String privateZoneName , String virtualNetworkLinkName , VirtualNetworkLinkInner parameters , String ifMatch , final ServiceCallback < VirtualNetworkLinkInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( updateWithServiceResponseAsync ( resourceGroupName , privateZoneName , virtualNetworkLinkName , parameters , ifMatch ) , serviceCallback ) ;
public class CyclicCarbohydrateRecognition { /** * Determine the turns in the polygon formed of the provided coordinates . * @ param points polygon points * @ return array of turns ( left , right ) or null if a parallel line was found */ static Turn [ ] turns ( Point2d [ ] points ) { } }
final Turn [ ] turns = new Turn [ points . length ] ; // cycle of size 6 is [ 1,2,3,4,5,6 ] not closed for ( int i = 1 ; i <= points . length ; i ++ ) { Point2d prevXy = points [ i - 1 ] ; Point2d currXy = points [ i % points . length ] ; Point2d nextXy = points [ ( i + 1 ) % points . length ] ; int parity = ( int ) Math . signum ( det ( prevXy . x , prevXy . y , currXy . x , currXy . y , nextXy . x , nextXy . y ) ) ; if ( parity == 0 ) return null ; turns [ i % points . length ] = parity < 0 ? Right : Turn . Left ; } return turns ;
public class Settings { /** * Writes { @ code other } into this . If any setting is populated by this and * { @ code other } , the value and flags from { @ code other } will be kept . */ void merge ( Settings other ) { } }
for ( int i = 0 ; i < COUNT ; i ++ ) { if ( ! other . isSet ( i ) ) continue ; set ( i , other . flags ( i ) , other . get ( i ) ) ; }
public class QuerySources { /** * Obtain a { @ link NodeSequence } that uses the supplied index to find the node that satisfy the given constraints . * @ param index the index ; may not be null * @ param cardinalityEstimate an estimation for the cardinality of that index , as returned during the planning phase * @ param constraints the constraints that apply to the index ; may not be null but can be empty * @ param joinConditions the join constraints that apply to the index ; may not be but can be empty * @ param variables the immutable map of variable values keyed by their name ; never null but possibly empty * @ param parameters the provider - specific index parameters ; may not be null , but may be empty * @ param valueFactories the value factories ; never null * @ param batchSize the ideal number of nodes that are to be included in each batch ; always positive * @ return the sequence of nodes ; null if the index cannot be used ( e . g . , it might be rebuilding or in an inconsistent state ) */ public NodeSequence fromIndex ( final Index index , final long cardinalityEstimate , final Collection < Constraint > constraints , final Collection < JoinCondition > joinConditions , final Map < String , Object > variables , final Map < String , Object > parameters , final ValueFactories valueFactories , final int batchSize ) { } }
if ( ! index . isEnabled ( ) ) { return null ; } final IndexConstraints indexConstraints = new IndexConstraints ( ) { @ Override public boolean hasConstraints ( ) { return ! constraints . isEmpty ( ) ; } @ Override public Collection < Constraint > getConstraints ( ) { return constraints ; } @ Override public Map < String , Object > getVariables ( ) { return variables ; } @ Override public ValueFactories getValueFactories ( ) { return valueFactories ; } @ Override public Map < String , Object > getParameters ( ) { return parameters ; } @ Override public Collection < JoinCondition > getJoinConditions ( ) { return joinConditions ; } } ; // Return a node sequence that will lazily get the results from the index . . . return new NodeSequence ( ) { private Index . Results results ; private Filter . ResultBatch currentBatch ; private boolean more = true ; private long rowCount = 0L ; @ Override public int width ( ) { return 1 ; } @ Override public long getRowCount ( ) { if ( ! more ) return rowCount ; return - 1 ; } @ Override public boolean isEmpty ( ) { if ( rowCount > 0 ) return false ; if ( ! more ) return true ; // rowCount was not > 0 , but there are no more if ( results == null ) { // We haven ' t read anything yet , so return ' false ' always ( even if this is not the case ) // so we can delay the loading of the results until really needed . . . return false ; } readBatch ( ) ; return rowCount == 0 ; } @ Override public Batch nextBatch ( ) { if ( currentBatch == null ) { if ( ! more ) { // make sure we always close close ( ) ; return null ; } readBatch ( ) ; } Batch nextBatch = NodeSequence . batchOfKeys ( currentBatch . keys ( ) . iterator ( ) , currentBatch . scores ( ) . iterator ( ) , currentBatch . size ( ) , workspaceName , repo ) ; currentBatch = null ; return nextBatch ; } @ Override public void close ( ) { if ( results != null ) { results . close ( ) ; results = null ; } } protected final void readBatch ( ) { if ( currentBatch != null ) { return ; } currentBatch = getResults ( ) . getNextBatch ( batchSize ) ; more = currentBatch . hasNext ( ) ; rowCount += currentBatch . size ( ) ; } @ Override public String toString ( ) { return "(from-index " + index . getName ( ) + " with " + constraints + ")" ; } private Index . Results getResults ( ) { if ( results != null ) return results ; // Otherwise we have to initialize the results , so have the index do the filtering based upon the constraints . . . results = index . filter ( indexConstraints , cardinalityEstimate ) ; return results ; } } ;
public class LockableEntityGroupImpl { /** * Ask the service to update this group ( in the store ) , update the back - pointers of the updated * members , and force the retrieval of containing groups in case the memberships of THIS group * have changed during the time the group has been locked . */ private void primUpdateMembers ( boolean renewLock ) throws GroupsException { } }
getLockableGroupService ( ) . updateGroupMembers ( this , renewLock ) ; clearPendingUpdates ( ) ; this . invalidateInParentGroupsCache ( Collections . singleton ( ( IGroupMember ) this ) ) ;
public class CmsDefaultAppButtonProvider { /** * Creates a properly styled button for the given app , without adding a click handler or checking visibility settings . < p > * @ param appCat the app category * @ param locale the locale * @ return the button component */ public static Button createAppFolderIconButton ( I_CmsFolderAppCategory appCat , Locale locale ) { } }
return createIconButton ( appCat . getName ( locale ) , appCat . getHelpText ( locale ) , appCat . getIcon ( ) , appCat . getButtonStyle ( ) ) ;
public class CouchbaseBucket { /** * START OF SUB - DOCUMENT API * */ @ Override public LookupInBuilder lookupIn ( String docId ) { } }
AsyncLookupInBuilder asyncBuilder = asyncBucket . lookupIn ( docId ) ; return new LookupInBuilder ( asyncBuilder , kvTimeout , TIMEOUT_UNIT ) ;
public class GetClientCertificatesRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( GetClientCertificatesRequest getClientCertificatesRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( getClientCertificatesRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( getClientCertificatesRequest . getPosition ( ) , POSITION_BINDING ) ; protocolMarshaller . marshall ( getClientCertificatesRequest . getLimit ( ) , LIMIT_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class Channels { /** * Sends a { @ code " shutdownOutput " } request to the last * { @ link ChannelDownstreamHandler } in the { @ link ChannelPipeline } of * the specified { @ link Channel } . * @ param channel the channel to bind * @ return the { @ link ChannelFuture } which will be notified when the * shutdownOutput operation is done */ public static ChannelFuture shutdownOutput ( Channel channel ) { } }
ChannelFuture future = future ( channel ) ; channel . getPipeline ( ) . sendDownstream ( new DownstreamShutdownOutputEvent ( channel , future ) ) ; return future ;
public class Ifc2x3tc1PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getIfcFlowStorageDeviceType ( ) { } }
if ( ifcFlowStorageDeviceTypeEClass == null ) { ifcFlowStorageDeviceTypeEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc2x3tc1Package . eNS_URI ) . getEClassifiers ( ) . get ( 251 ) ; } return ifcFlowStorageDeviceTypeEClass ;
public class SchemaLookup { /** * Set the value of a single input . * @ param key key of input * @ param value value of input */ public void setInput ( String key , String value ) { } }
if ( getAllowedKeys ( ) != null && ! getAllowedKeys ( ) . contains ( key ) ) throw new IllegalStateException ( "The input key " + key + " is not allowed for lookups" ) ; _inputs . put ( key , value ) ;
public class AbstractServiceValidateController { /** * Handle proxy granting ticket delivery . * @ param serviceTicketId the service ticket id * @ param credential the service credential * @ return the ticket granting ticket * @ throws AuthenticationException the authentication exception * @ throws AbstractTicketException the abstract ticket exception */ public TicketGrantingTicket handleProxyGrantingTicketDelivery ( final String serviceTicketId , final Credential credential ) throws AuthenticationException , AbstractTicketException { } }
val serviceTicket = serviceValidateConfigurationContext . getCentralAuthenticationService ( ) . getTicket ( serviceTicketId , ServiceTicket . class ) ; val authenticationResult = serviceValidateConfigurationContext . getAuthenticationSystemSupport ( ) . handleAndFinalizeSingleAuthenticationTransaction ( serviceTicket . getService ( ) , credential ) ; val proxyGrantingTicketId = serviceValidateConfigurationContext . getCentralAuthenticationService ( ) . createProxyGrantingTicket ( serviceTicketId , authenticationResult ) ; LOGGER . debug ( "Generated proxy-granting ticket [{}] off of service ticket [{}] and credential [{}]" , proxyGrantingTicketId . getId ( ) , serviceTicketId , credential ) ; return proxyGrantingTicketId ;
public class FileOperations { /** * Delete an existing directory including all child objects . * @ param aDir * The directory to be deleted . May not be < code > null < / code > . * @ return A non - < code > null < / code > error code . */ @ Nonnull public static FileIOError deleteDirRecursive ( @ Nonnull final File aDir ) { } }
ValueEnforcer . notNull ( aDir , "Directory" ) ; // Non - existing directory ? if ( ! FileHelper . existsDir ( aDir ) ) return EFileIOErrorCode . SOURCE_DOES_NOT_EXIST . getAsIOError ( EFileIOOperation . DELETE_DIR_RECURSIVE , aDir ) ; if ( isExceptionOnDeleteRoot ( ) ) { // Check that we ' re not deleting the complete hard drive . . . if ( aDir . getAbsoluteFile ( ) . getParent ( ) == null ) throw new IllegalArgumentException ( "Aren't we deleting the full drive: '" + aDir . getAbsolutePath ( ) + "'" ) ; } // Is the parent directory writable ? final File aParentDir = aDir . getParentFile ( ) ; if ( aParentDir != null && ! aParentDir . canWrite ( ) ) return EFileIOErrorCode . SOURCE_PARENT_NOT_WRITABLE . getAsIOError ( EFileIOOperation . DELETE_DIR_RECURSIVE , aDir ) ; // iterate directory for ( final File aChild : FileHelper . getDirectoryContent ( aDir ) ) { // is it a file or a directory or . . . if ( aChild . isDirectory ( ) ) { // Ignore " . " and " . . " directory if ( FilenameHelper . isSystemInternalDirectory ( aChild . getName ( ) ) ) continue ; // recursive call final FileIOError eCode = deleteDirRecursive ( aChild ) ; if ( eCode . isFailure ( ) ) return eCode ; } else if ( aChild . isFile ( ) ) { // delete file final FileIOError eCode = deleteFile ( aChild ) ; if ( eCode . isFailure ( ) ) return eCode ; } else { // Neither directory no file - don ' t know how to handle return EFileIOErrorCode . OBJECT_CANNOT_BE_HANDLED . getAsIOError ( EFileIOOperation . DELETE_DIR_RECURSIVE , aChild ) ; } } // Now this directory should be empty - > delete as if empty return deleteDir ( aDir ) ;
public class DbTableAccess { /** * Allows a user to insert new data into a table , while respecting * the schema and capabilities of the user . * @ param params Map of column names to their respective values * @ return JSON object representing the inserted object * @ throws Exception */ public JSONObject insert ( Map < String , String > params ) throws Exception { } }
OperationAccess operationAccess = tableSchema . getInsertAccess ( ) ; if ( false == operationAccess . isAllowed ( ) ) { throw new Exception ( "Attempting to insert a record while the privilege is not allowed: " + tableSchema . getLogicalName ( ) + " (" + tableSchema . getPhysicalName ( ) + ")" ) ; } // Create a list of parameters to retrieve the row after it is // inserted List < RecordSelector > whereClauses = new Vector < RecordSelector > ( ) ; // Create a list of all writable columns where a value is specified in // the parameters List < ColumnData > columnsWithParam = new Vector < ColumnData > ( ) ; for ( String columnName : params . keySet ( ) ) { ColumnData columnData = tableSchema . getColumnFromName ( columnName ) ; if ( null != columnData && false == columnData . isWriteable ( ) ) { columnData = null ; } if ( null != columnData && columnData . isAutoIncrementInteger ( ) ) { columnData = null ; } if ( null == columnData ) { throw new Exception ( "No write access to column " + columnName + " in table " + tableSchema . getLogicalName ( ) + " (" + tableSchema . getPhysicalName ( ) + ")" ) ; } else { columnsWithParam . add ( columnData ) ; } } // Get all columns that are auto fill List < ColumnData > autoIncrementIntegerColumns = new Vector < ColumnData > ( ) ; for ( ColumnData columnData : tableSchema . getColumns ( ) ) { if ( columnData . isAutoIncrementInteger ( ) ) { autoIncrementIntegerColumns . add ( columnData ) ; } } // Get all columns that are assigned a value on insert List < ColumnData > valueAssignedColumns = new Vector < ColumnData > ( ) ; for ( ColumnData columnData : tableSchema . getColumns ( ) ) { if ( null != columnData . getAssignValueOnInsert ( ) ) { valueAssignedColumns . add ( columnData ) ; } else if ( null != columnData . getAssignVariableOnInsert ( ) ) { valueAssignedColumns . add ( columnData ) ; } } // Sort according to column name . This offers greater reusability // of the prepared statement . Collections . sort ( autoIncrementIntegerColumns , new ColumnDataComparator ( ) ) ; Collections . sort ( columnsWithParam , new ColumnDataComparator ( ) ) ; Collections . sort ( valueAssignedColumns , new ColumnDataComparator ( ) ) ; // Obtain all auto increment integers List < Integer > autoIncrementIntegerValues = new Vector < Integer > ( ) ; for ( ColumnData autoIncrementIntegerColumn : autoIncrementIntegerColumns ) { int nextValue = ColumnDataUtils . obtainNextIncrementInteger ( connection , autoIncrementIntegerColumn ) ; Integer value = new Integer ( nextValue ) ; autoIncrementIntegerValues . add ( value ) ; whereClauses . add ( new RecordSelectorComparison ( autoIncrementIntegerColumn . getColumnName ( ) , RecordSelectorComparison . Comparison . EQUAL , new ExpressionConstantImpl ( value . toString ( ) ) ) ) ; } // Create SQL command String sqlQuery = null ; PreparedStatement pstmt = null ; { StringWriter sw = new StringWriter ( ) ; PrintWriter pw = new PrintWriter ( sw ) ; pw . print ( "INSERT INTO " ) ; pw . print ( tableSchema . getPhysicalName ( ) ) ; pw . print ( " (" ) ; boolean first = true ; for ( ColumnData columnData : autoIncrementIntegerColumns ) { if ( first ) { first = false ; } else { pw . print ( "," ) ; } pw . print ( columnData . getColumnName ( ) ) ; } for ( ColumnData columnData : columnsWithParam ) { if ( first ) { first = false ; } else { pw . print ( "," ) ; } pw . print ( columnData . getColumnName ( ) ) ; } for ( ColumnData columnData : valueAssignedColumns ) { if ( first ) { first = false ; } else { pw . print ( "," ) ; } pw . print ( columnData . getColumnName ( ) ) ; } pw . print ( ") VALUES (" ) ; first = true ; for ( ColumnData columnData : autoIncrementIntegerColumns ) { if ( first ) { first = false ; } else { pw . print ( "," ) ; } pw . print ( columnData . getInsertWildcard ( ) ) ; } for ( ColumnData columnData : columnsWithParam ) { if ( first ) { first = false ; } else { pw . print ( "," ) ; } pw . print ( columnData . getInsertWildcard ( ) ) ; } for ( ColumnData columnData : valueAssignedColumns ) { if ( first ) { first = false ; } else { pw . print ( "," ) ; } pw . print ( columnData . getInsertWildcard ( ) ) ; } pw . print ( ");" ) ; pw . flush ( ) ; sqlQuery = sw . toString ( ) ; pstmt = connection . prepareStatement ( sqlQuery ) ; // Populate prepared statement int index = 1 ; for ( Integer integerValue : autoIncrementIntegerValues ) { pstmt . setInt ( index , integerValue . intValue ( ) ) ; ++ index ; } for ( ColumnData columnData : columnsWithParam ) { // Compute value String value = params . get ( columnData . getColumnName ( ) ) ; ColumnDataUtils . writeToPreparedStatement ( pstmt , index , value , columnData . getColumnType ( ) ) ; ++ index ; } for ( ColumnData columnData : valueAssignedColumns ) { String value = columnData . getAssignValueOnInsert ( ) ; if ( null == value && null != columnData . getAssignVariableOnInsert ( ) ) { value = variables . getVariableValue ( columnData . getAssignVariableOnInsert ( ) ) ; } ColumnDataUtils . writeToPreparedStatement ( pstmt , index , value , columnData . getColumnType ( ) ) ; ++ index ; } } // If there are no selector , there is no point in inserting the data since // we will not be able to retrieve it if ( whereClauses . size ( ) < 1 ) { throw new Exception ( "Refusing to insert data since it can not be selected: " + sqlQuery ) ; } // Execute insert pstmt . execute ( ) ; // Now , we need to retrieve the object JSONArray array = query ( whereClauses , null , null , null , null , null ) ; // In INSERT , we expect only one element in array if ( 1 != array . length ( ) ) { throw new Exception ( "Expected only one element returned in an INSERT. Returned size:" + array . length ( ) + " sql: " + sqlQuery ) ; } return array . getJSONObject ( 0 ) ;
public class DefaultDocWorkUnitHandler { /** * Return the description to be used for the work unit . We need to manually strip * out any inline custom javadoc tags since we don ' t those in the summary . * @ param currentWorkUnit * @ return Description to be used or the work unit . */ protected String getDescription ( final DocWorkUnit currentWorkUnit ) { } }
return Arrays . stream ( currentWorkUnit . getClassDoc ( ) . inlineTags ( ) ) . filter ( t -> getTagPrefix ( ) == null || ! t . name ( ) . startsWith ( getTagPrefix ( ) ) ) . map ( t -> t . text ( ) ) . collect ( Collectors . joining ( ) ) ;
public class CritBit64 { /** * Compares two values . * @ param v1 * @ param v2 * @ return Position of the differing bit , or - 1 if both values are equal */ private static int compare ( long v1 , long v2 ) { } }
int pos = 0 ; if ( v1 != v2 ) { long x = v1 ^ v2 ; pos += Long . numberOfLeadingZeros ( x ) ; return pos ; } return - 1 ;
public class MavenDependencyResolver { /** * when failing to read data from ' mvn dependency : tree ' output - trying to read directly from POM files */ private void collectDependenciesFromPomXml ( Set < String > bomFiles , Collection < AgentProjectInfo > projects ) { } }
MavenPomParser pomParser = new MavenPomParser ( ignorePomModules ) ; List < BomFile > bomFileList = new LinkedList < > ( ) ; HashMap < String , String > bomArtifactPathMap = new HashMap < > ( ) ; for ( String bomFileName : bomFiles ) { BomFile bomfile = pomParser . parseBomFile ( bomFileName ) ; bomFileList . add ( bomfile ) ; bomArtifactPathMap . put ( bomfile . getName ( ) , bomFileName ) ; } for ( AgentProjectInfo project : projects ) { // add dependencies from pom to the modules that didn ' t fail ( or failed partially ) String pomLocationPerProject = bomArtifactPathMap . get ( project . getCoordinates ( ) . getArtifactId ( ) ) ; if ( pomLocationPerProject != null ) { bomArtifactPathMap . remove ( project . getCoordinates ( ) . getArtifactId ( ) ) ; List < DependencyInfo > dependencyInfoList = pomParser . parseDependenciesFromPomXml ( pomLocationPerProject ) ; // making sure not to add duplication of already existing dependencies project . getDependencies ( ) . addAll ( dependencyInfoList . stream ( ) . filter ( dependencyInfo -> project . getDependencies ( ) . contains ( dependencyInfo ) == false ) . collect ( Collectors . toList ( ) ) ) ; } } for ( String artifactId : bomArtifactPathMap . keySet ( ) ) { for ( BomFile missingProject : bomFileList ) { // if project was not created due to failure add its dependencies if ( artifactId . equals ( missingProject . getName ( ) ) ) { AgentProjectInfo projectInfo = new AgentProjectInfo ( ) ; projectInfo . setCoordinates ( new Coordinates ( missingProject . getGroupId ( ) , missingProject . getName ( ) , missingProject . getVersion ( ) ) ) ; projectInfo . getDependencies ( ) . addAll ( pomParser . parseDependenciesFromPomXml ( bomArtifactPathMap . get ( missingProject . getName ( ) ) ) ) ; projects . add ( projectInfo ) ; break ; } } }
public class GrassRasterReader { /** * utility to set particular parameters */ public void setParameter ( String key , Object obj ) { } }
if ( key . equals ( "novalue" ) ) { // $ NON - NLS - 1 $ novalue = obj ; } else if ( key . equals ( "matrixtype" ) ) { // $ NON - NLS - 1 $ Integer dmtype = ( Integer ) obj ; matrixType = dmtype . intValue ( ) ; }
public class DefaultGroovyMethods { /** * Creates a spreadable map from this iterable . * @ param self an iterable * @ return a newly created SpreadMap * @ see groovy . lang . SpreadMap # SpreadMap ( java . util . List ) * @ see # toSpreadMap ( java . util . Map ) * @ since 2.4.0 */ public static SpreadMap toSpreadMap ( Iterable self ) { } }
if ( self == null ) throw new GroovyRuntimeException ( "Fail to convert Iterable to SpreadMap, because it is null." ) ; else return toSpreadMap ( asList ( self ) ) ;
public class JobRun { /** * A list of predecessors to this job run . * @ param predecessorRuns * A list of predecessors to this job run . */ public void setPredecessorRuns ( java . util . Collection < Predecessor > predecessorRuns ) { } }
if ( predecessorRuns == null ) { this . predecessorRuns = null ; return ; } this . predecessorRuns = new java . util . ArrayList < Predecessor > ( predecessorRuns ) ;
public class IntStreamEx { /** * Returns a stream consisting of the elements of this stream , additionally * performing the provided action on the last stream element when it ' s * consumed from the resulting stream . * This is an < a href = " package - summary . html # StreamOps " > intermediate * operation < / a > . * The action is called at most once . For parallel stream pipelines , it ' s * not guaranteed in which thread it will be executed , so if it modifies * shared state , it is responsible for providing the required * synchronization . * Note that the action might not be called at all if the last element is * not consumed from the input ( for example , if there ' s short - circuiting * operation downstream ) . * This method exists mainly to support debugging . * @ param action a * < a href = " package - summary . html # NonInterference " > non - interfering * < / a > action to perform on the first stream element as it is * consumed from the stream * @ return the new stream * @ since 0.6.0 */ public IntStreamEx peekLast ( IntConsumer action ) { } }
return mapLast ( x -> { action . accept ( x ) ; return x ; } ) ;
public class KamDialect { /** * { @ inheritDoc } */ @ Override public Set < KamEdge > getEdges ( KamNode sourceNode , KamNode targetNode ) { } }
return wrapEdges ( kam . getEdges ( sourceNode , targetNode ) ) ;
public class SimpleCollector { /** * Return the Child with the given labels , creating it if needed . * Must be passed the same number of labels are were passed to { @ link # labelNames } . */ public Child labels ( String ... labelValues ) { } }
if ( labelValues . length != labelNames . size ( ) ) { throw new IllegalArgumentException ( "Incorrect number of labels." ) ; } for ( String label : labelValues ) { if ( label == null ) { throw new IllegalArgumentException ( "Label cannot be null." ) ; } } List < String > key = Arrays . asList ( labelValues ) ; Child c = children . get ( key ) ; if ( c != null ) { return c ; } Child c2 = newChild ( ) ; Child tmp = children . putIfAbsent ( key , c2 ) ; return tmp == null ? c2 : tmp ;
public class JdbcUtil { /** * Safely closes resources and logs errors . * @ param conn Connection to close */ public static void close ( Connection conn ) { } }
if ( conn != null ) { try { conn . close ( ) ; } catch ( SQLException ex ) { logger . error ( "" , ex ) ; } }
public class RequestContext { /** * Renders using { @ link JsonRenderer } with { " code " : int } . * @ param code the specified code * @ return this context */ public RequestContext renderJSON ( final int code ) { } }
final JsonRenderer jsonRenderer = new JsonRenderer ( ) ; final JSONObject ret = new JSONObject ( ) . put ( Keys . CODE , code ) ; jsonRenderer . setJSONObject ( ret ) ; this . renderer = jsonRenderer ; return this ;
public class JsonObject { /** * Creates a deep copy of this element and all its children * @ since 2.8.2 */ @ Override public JsonObject deepCopy ( ) { } }
JsonObject result = new JsonObject ( ) ; for ( Map . Entry < String , JsonElement > entry : members . entrySet ( ) ) { result . add ( entry . getKey ( ) , entry . getValue ( ) . deepCopy ( ) ) ; } return result ;
public class UpdateHealthCheckRequest { /** * A complex type that contains one < code > ResettableElementName < / code > element for each element that you want to * reset to the default value . Valid values for < code > ResettableElementName < / code > include the following : * < ul > * < li > * < code > ChildHealthChecks < / code > : Amazon Route 53 resets < a href = * " https : / / docs . aws . amazon . com / Route53 / latest / APIReference / API _ HealthCheckConfig . html # Route53 - Type - HealthCheckConfig - ChildHealthChecks " * > ChildHealthChecks < / a > to null . * < / li > * < li > * < code > FullyQualifiedDomainName < / code > : Route 53 resets < a href = * " https : / / docs . aws . amazon . com / Route53 / latest / APIReference / API _ UpdateHealthCheck . html # Route53 - UpdateHealthCheck - request - FullyQualifiedDomainName " * > FullyQualifiedDomainName < / a > . to null . * < / li > * < li > * < code > Regions < / code > : Route 53 resets the < a href = * " https : / / docs . aws . amazon . com / Route53 / latest / APIReference / API _ HealthCheckConfig . html # Route53 - Type - HealthCheckConfig - Regions " * > Regions < / a > list to the default set of regions . * < / li > * < li > * < code > ResourcePath < / code > : Route 53 resets < a href = * " https : / / docs . aws . amazon . com / Route53 / latest / APIReference / API _ HealthCheckConfig . html # Route53 - Type - HealthCheckConfig - ResourcePath " * > ResourcePath < / a > to null . * < / li > * < / ul > * @ param resetElements * A complex type that contains one < code > ResettableElementName < / code > element for each element that you want * to reset to the default value . Valid values for < code > ResettableElementName < / code > include the * following : < / p > * < ul > * < li > * < code > ChildHealthChecks < / code > : Amazon Route 53 resets < a href = * " https : / / docs . aws . amazon . com / Route53 / latest / APIReference / API _ HealthCheckConfig . html # Route53 - Type - HealthCheckConfig - ChildHealthChecks " * > ChildHealthChecks < / a > to null . * < / li > * < li > * < code > FullyQualifiedDomainName < / code > : Route 53 resets < a href = * " https : / / docs . aws . amazon . com / Route53 / latest / APIReference / API _ UpdateHealthCheck . html # Route53 - UpdateHealthCheck - request - FullyQualifiedDomainName " * > FullyQualifiedDomainName < / a > . to null . * < / li > * < li > * < code > Regions < / code > : Route 53 resets the < a href = * " https : / / docs . aws . amazon . com / Route53 / latest / APIReference / API _ HealthCheckConfig . html # Route53 - Type - HealthCheckConfig - Regions " * > Regions < / a > list to the default set of regions . * < / li > * < li > * < code > ResourcePath < / code > : Route 53 resets < a href = * " https : / / docs . aws . amazon . com / Route53 / latest / APIReference / API _ HealthCheckConfig . html # Route53 - Type - HealthCheckConfig - ResourcePath " * > ResourcePath < / a > to null . * < / li > * @ return Returns a reference to this object so that method calls can be chained together . * @ see ResettableElementName */ public UpdateHealthCheckRequest withResetElements ( ResettableElementName ... resetElements ) { } }
com . amazonaws . internal . SdkInternalList < String > resetElementsCopy = new com . amazonaws . internal . SdkInternalList < String > ( resetElements . length ) ; for ( ResettableElementName value : resetElements ) { resetElementsCopy . add ( value . toString ( ) ) ; } if ( getResetElements ( ) == null ) { setResetElements ( resetElementsCopy ) ; } else { getResetElements ( ) . addAll ( resetElementsCopy ) ; } return this ;
public class TenantService { /** * Returns true if a tenant with the specified fname exists , otherwise false . * @ since 4.3 */ public boolean fnameExists ( final String fname ) { } }
boolean rslt = false ; // default try { final ITenant tenant = getTenantByFName ( fname ) ; rslt = tenant != null ; } catch ( IllegalArgumentException iae ) { // This exception is completely fine ; it simply // means there is no tenant with this fname . rslt = false ; } return rslt ;
public class VdmDebugPlugin { /** * Logging */ public static void log ( Throwable t ) { } }
Throwable top = t ; if ( t instanceof DebugException ) { Throwable throwable = ( ( DebugException ) t ) . getStatus ( ) . getException ( ) ; if ( throwable != null ) { top = throwable ; } } log ( new Status ( IStatus . ERROR , PLUGIN_ID , INTERNAL_ERROR , "internalErrorLoggedFromVdmDebugPlugin" + top . getMessage ( ) , top ) ) ;
public class GeneralValidator { /** * Gets the transformers transforming the output of each data provider before they are mapped to the rules . * @ return Data provider output transformers . */ public Transformer [ ] getDataProviderOutputTransformers ( ) { } }
Transformer [ ] transformers ; if ( dataProviderOutputTransformers == null ) { transformers = null ; } else { transformers = dataProviderOutputTransformers . toArray ( new Transformer [ dataProviderOutputTransformers . size ( ) ] ) ; } return transformers ;
public class OptionalInt { /** * Invokes mapping function on inner value if present . * @ param mapper mapping function * @ return an { @ code OptionalInt } with transformed value if present , * otherwise an empty { @ code OptionalInt } * @ throws NullPointerException if value is present and * { @ code mapper } is { @ code null } * @ since 1.1.3 */ @ NotNull public OptionalInt map ( @ NotNull IntUnaryOperator mapper ) { } }
if ( ! isPresent ( ) ) return empty ( ) ; return OptionalInt . of ( mapper . applyAsInt ( value ) ) ;
public class CmsPropertyDelete { /** * Builds a HTML list of Resources . < p > * Columns : Type , Name , Uri , Value of the property , locked by ( optional ) . < p > * @ param resourceList a list of resources * @ param lockInfo a boolean to decide if the locked info should be shown or not * @ throws CmsException if operation was not successful * @ return the HTML String for the Resource list */ public String buildResourceList ( List resourceList , boolean lockInfo ) throws CmsException { } }
// reverse the resource list Collections . reverse ( resourceList ) ; CmsMessages messages = Messages . get ( ) . getBundle ( getLocale ( ) ) ; StringBuffer result = new StringBuffer ( ) ; result . append ( "<table border=\"0\" width=\"100%\" cellpadding=\"1\" cellspacing=\"1\">\n" ) ; result . append ( "<tr>\n" ) ; // Type result . append ( "\t<td style=\"width:5%;\" class=\"textbold\">" ) ; result . append ( messages . key ( Messages . GUI_INPUT_TYPE_0 ) ) ; result . append ( "</td>\n" ) ; // Uri result . append ( "\t<td style=\"width:40%;\" class=\"textbold\">" ) ; result . append ( messages . key ( Messages . GUI_INPUT_ADRESS_0 ) ) ; result . append ( "</td>\n" ) ; // Name result . append ( "\t<td style=\"width:25%;\" class=\"textbold\">" ) ; result . append ( messages . key ( Messages . GUI_INPUT_TITLE_0 ) ) ; result . append ( "</td>\n" ) ; if ( ! lockInfo ) { // Property value result . append ( "\t<td style=\"width:30%;\" class=\"textbold\">" ) ; result . append ( messages . key ( Messages . GUI_INPUT_PROPERTYVALUE_0 ) ) ; result . append ( "</td>\n" ) ; } if ( lockInfo ) { // Property value result . append ( "\t<td style=\"width:30%;\" class=\"textbold\">" ) ; result . append ( messages . key ( Messages . GUI_EXPLORER_LOCKEDBY_0 ) ) ; result . append ( "</td>\n" ) ; result . append ( "</tr>\n" ) ; } result . append ( "</tr>\n" ) ; result . append ( "<tr><td colspan=\"4\"><span style=\"height: 6px;\">&nbsp;</span></td></tr>\n" ) ; String storedSiteRoot = getCms ( ) . getRequestContext ( ) . getSiteRoot ( ) ; try { getCms ( ) . getRequestContext ( ) . setSiteRoot ( "/" ) ; Iterator i = resourceList . iterator ( ) ; while ( i . hasNext ( ) ) { CmsResource resource = ( CmsResource ) i . next ( ) ; String filetype = OpenCms . getResourceManager ( ) . getResourceType ( resource . getTypeId ( ) ) . getTypeName ( ) ; result . append ( "<tr>\n" ) ; // file type result . append ( "\t<td>" ) ; result . append ( "<img src=\"" ) ; result . append ( getSkinUri ( ) ) ; result . append ( CmsWorkplace . RES_PATH_FILETYPES ) ; result . append ( filetype ) ; result . append ( ".gif\">" ) ; result . append ( "</td>\n" ) ; // file address result . append ( "\t<td>" ) ; result . append ( resource . getRootPath ( ) ) ; result . append ( "</td>\n" ) ; // title result . append ( "\t<td>" ) ; result . append ( getJsp ( ) . property ( CmsPropertyDefinition . PROPERTY_TITLE , resource . getRootPath ( ) , "" ) ) ; result . append ( "</td>\n" ) ; // current value of the property if ( ! lockInfo ) { result . append ( "\t<td>" ) ; result . append ( getJsp ( ) . property ( getParamPropertyName ( ) , resource . getRootPath ( ) ) ) ; result . append ( "</td>\n" ) ; } // locked by user if ( lockInfo ) { CmsLock lock = getCms ( ) . getLock ( resource ) ; result . append ( "\t<td>" ) ; result . append ( getCms ( ) . readUser ( lock . getUserId ( ) ) . getName ( ) ) ; result . append ( "</td>\n" ) ; } result . append ( "</tr>\n" ) ; } result . append ( "</table>\n" ) ; } finally { getCms ( ) . getRequestContext ( ) . setSiteRoot ( storedSiteRoot ) ; } return result . toString ( ) ;
public class ContextWindow { /** * Enables or disables the component . */ @ Override public void setEnabled ( boolean enabled ) { } }
context . setEnabled ( enabled ) ; thisTable . setEnabled ( enabled ) ; localsTable . setEnabled ( enabled ) ; evaluator . setEnabled ( enabled ) ; cmdLine . setEnabled ( enabled ) ;
public class GreenPepperLogo { /** * { @ inheritDoc } */ @ SuppressWarnings ( "unchecked" ) public String execute ( Map parameters , String body , RenderContext renderContext ) throws MacroException { } }
try { Map contextMap = MacroUtils . defaultVelocityContext ( ) ; return VelocityUtils . getRenderedTemplate ( "/templates/greenpepper/confluence/macros/greenPepperLogo.vm" , contextMap ) ; } catch ( Exception e ) { return getErrorView ( "greenpepper.logo.macroid" , e . getMessage ( ) ) ; }
public class AdminToolQuartzServiceImpl { /** * / * ( non - Javadoc ) * @ see de . chandre . admintool . quartz . AdminToolQuartzService # isPaused ( org . quartz . Trigger ) */ @ Override public boolean isPaused ( Trigger trigger ) throws SchedulerException { } }
return scheduler . getTriggerState ( trigger . getKey ( ) ) == TriggerState . PAUSED ;
public class Util { /** * Returns a hash set of elements */ public static < A > Set < A > set ( A ... elements ) { } }
final Set < A > set = new HashSet < A > ( elements . length ) ; for ( A element : elements ) { set . add ( element ) ; } return set ;
public class ListTriggersRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ListTriggersRequest listTriggersRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( listTriggersRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( listTriggersRequest . getNextToken ( ) , NEXTTOKEN_BINDING ) ; protocolMarshaller . marshall ( listTriggersRequest . getDependentJobName ( ) , DEPENDENTJOBNAME_BINDING ) ; protocolMarshaller . marshall ( listTriggersRequest . getMaxResults ( ) , MAXRESULTS_BINDING ) ; protocolMarshaller . marshall ( listTriggersRequest . getTags ( ) , TAGS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class CmsConvertXmlThread { /** * Transforms and write files . < p > * @ param files2Transform Files to transform * @ param xsltFile XLST file which includes logic for transforming * @ param cmsObject Current CmsObject * @ param newXsdMainFile New xsd main file * @ param report I _ CmsReport * @ return Project with files to publish * @ throws CmsException Can become thrown while creating temporary OpenCms Projects */ private CmsObject transformAndWriteFiles ( List < CmsResource > files2Transform , String xsltFile , CmsObject cmsObject , String newXsdMainFile , I_CmsReport report ) { } }
// the CmsObject to publish resources CmsObject cms1 = null ; // the CmsObject to handle resources which are not to publish CmsObject cms2 = null ; // the publish project CmsProject project2Publish = null ; // initialize the CmsObjects and the publish project try { cms1 = OpenCms . initCmsObject ( cmsObject ) ; cms2 = OpenCms . initCmsObject ( cmsObject ) ; cms1 . copyResourceToProject ( "/" ) ; project2Publish = cms1 . createTempfileProject ( ) ; // init new cms1 . getRequestContext ( ) . setCurrentProject ( project2Publish ) ; } catch ( CmsException e ) { report . println ( Messages . get ( ) . container ( Messages . RPT_CONVERTXML_INITIALIZE_CMS_ERROR_0 ) ) ; if ( LOG . isErrorEnabled ( ) ) { LOG . error ( e . toString ( ) ) ; } return cms1 ; } // iterate over all the resources to transform Iterator < CmsResource > iter = files2Transform . iterator ( ) ; while ( iter . hasNext ( ) ) { // get the next resource to transform CmsResource cmsResource = iter . next ( ) ; // check if the resource has to be published after transforming boolean resource2Publish = false ; // get info if resource shall become published CmsResourceState cmsResourceState = cmsResource . getState ( ) ; if ( ! ( cmsResourceState . equals ( CmsResourceState . STATE_CHANGED ) || cmsResourceState . equals ( CmsResourceState . STATE_NEW ) ) ) { // resource is not touched or is not new resource2Publish = true ; } // get current lock from file if ( resource2Publish ) { // lock the resource in the publish project try { // try to lock the resource if ( ! lockResource ( cms1 , project2Publish , cmsResource , report ) ) { report . println ( Messages . get ( ) . container ( Messages . RPT_CONVERTXML_LOCKED_FILE_1 , cmsObject . getSitePath ( cmsResource ) ) , I_CmsReport . FORMAT_ERROR ) ; continue ; } } catch ( CmsException e ) { report . println ( Messages . get ( ) . container ( Messages . RPT_CONVERTXML_LOCKED_FILE_1 , cmsObject . getSitePath ( cmsResource ) ) , I_CmsReport . FORMAT_ERROR ) ; if ( LOG . isErrorEnabled ( ) ) { LOG . error ( e . getMessageContainer ( ) , e ) ; } continue ; } } else { // lock the resource in the project where the resource was last modified in try { // get the project id from the project where the resource is last modified in CmsUUID pid = cmsResource . getProjectLastModified ( ) ; CmsProject fileProject = cms2 . readProject ( pid ) ; cms2 . getRequestContext ( ) . setCurrentProject ( fileProject ) ; // try to lock the resource if ( ! lockResource ( cms2 , fileProject , cmsResource , report ) ) { report . println ( Messages . get ( ) . container ( Messages . RPT_CONVERTXML_LOCKED_FILE_1 , cmsObject . getSitePath ( cmsResource ) ) , I_CmsReport . FORMAT_ERROR ) ; continue ; } } catch ( CmsException e ) { report . println ( Messages . get ( ) . container ( Messages . RPT_CONVERTXML_LOCKED_FILE_1 , cmsObject . getSitePath ( cmsResource ) ) , I_CmsReport . FORMAT_ERROR ) ; if ( LOG . isErrorEnabled ( ) ) { LOG . error ( e . getMessageContainer ( ) , e ) ; } continue ; } } // get CmsFile object and the xml content CmsFile cmsFile = null ; String fileXmlContent = "" ; try { cmsFile = cmsObject . readFile ( cmsResource ) ; CmsXmlContent xmlContent = CmsXmlContentFactory . unmarshal ( getCms ( ) , cmsFile ) ; fileXmlContent = xmlContent . toString ( ) ; } catch ( CmsException e ) { m_errorTransform += 1 ; report . println ( Messages . get ( ) . container ( Messages . RPT_CONVERTXML_TRANSFORMATION_ERROR_0 ) , I_CmsReport . FORMAT_ERROR ) ; if ( LOG . isErrorEnabled ( ) ) { LOG . error ( e . getMessageContainer ( ) , e ) ; } continue ; } // get encoding per resource String encodingType = "" ; try { encodingType = cmsObject . readPropertyObject ( cmsResource . getRootPath ( ) , CmsPropertyDefinition . PROPERTY_CONTENT_ENCODING , true ) . getValue ( OpenCms . getSystemInfo ( ) . getDefaultEncoding ( ) ) ; } catch ( CmsException e ) { encodingType = OpenCms . getSystemInfo ( ) . getDefaultEncoding ( ) ; } // check transform conditions per resource // encoding type given ? if ( CmsStringUtil . isEmpty ( encodingType ) ) { m_missingEncodingType += 1 ; report . println ( Messages . get ( ) . container ( Messages . RPT_CONVERTXML_MISSION_ENCODING_TYPE_1 , cmsResource . getRootPath ( ) ) , I_CmsReport . FORMAT_ERROR ) ; continue ; } // already transformed ? if ( fileXmlContent . toUpperCase ( ) . contains ( newXsdMainFile . toUpperCase ( ) ) ) { m_alreadyTransformed += 1 ; report . println ( Messages . get ( ) . container ( Messages . RPT_CONVERTXML_FILE_ALREADY_TRANSFORMED_1 , cmsResource . getRootPath ( ) ) , I_CmsReport . FORMAT_OK ) ; continue ; } // create and write the changed xml content try { String transformedXmlContent = CmsXsltUtil . transformXmlContent ( cmsObject , xsltFile , fileXmlContent ) ; transformedXmlContent = "<?xml version=\"1.0\" encoding=\"" . concat ( encodingType ) . concat ( "\"?>" ) . concat ( transformedXmlContent ) ; // write file xml content if ( resource2Publish ) { report . println ( Messages . get ( ) . container ( Messages . RPT_CONVERTXML_TRANSFORM_CURRENT_FILE_NAME2_2 , cmsResource . getRootPath ( ) , encodingType ) , I_CmsReport . FORMAT_OK ) ; cms1 . getRequestContext ( ) . setCurrentProject ( project2Publish ) ; setXmlContentFromFile ( cmsResource , cmsFile , cms1 , transformedXmlContent , encodingType , report ) ; } else { report . println ( Messages . get ( ) . container ( Messages . RPT_CONVERTXML_TRANSFORM_CURRENT_FILE_NAME_2 , cmsResource . getRootPath ( ) , encodingType ) , I_CmsReport . FORMAT_OK ) ; setXmlContentFromFile ( cmsResource , cmsFile , cms2 , transformedXmlContent , encodingType , report ) ; } } catch ( CmsXmlException e ) { m_errorTransform += 1 ; report . println ( Messages . get ( ) . container ( Messages . RPT_CONVERTXML_TRANSFORMATION_ERROR_0 ) , I_CmsReport . FORMAT_ERROR ) ; if ( LOG . isErrorEnabled ( ) ) { LOG . error ( e . getMessageContainer ( ) , e ) ; } } catch ( CmsException e ) { m_errorTransform += 1 ; report . println ( Messages . get ( ) . container ( Messages . RPT_CONVERTXML_TRANSFORMATION_ERROR_0 ) , I_CmsReport . FORMAT_ERROR ) ; if ( LOG . isErrorEnabled ( ) ) { LOG . error ( e . getMessageContainer ( ) , e ) ; } } } return cms1 ;
public class PairCounter { /** * Counts the pair , increasing its total count by the specified positive amount . * @ param count a positive value for the number of times the object occurred * @ throws IllegalArgumentException if { @ code count } is not a positive value . */ public int count ( Pair < T > obj , int count ) { } }
if ( count < 1 ) throw new IllegalArgumentException ( "Count must be positive: " + count ) ; long index = getIndex ( obj ) ; int oldCount = counts . get ( index ) ; int newCount = count + oldCount ; counts . put ( index , newCount ) ; sum += count ; return newCount ;