signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class AbstractJSONDocScanner { /** * Gets the API documentation for the set of classes passed as argument */ public Set < ApiDoc > getApiDocs ( Set < Class < ? > > classes , MethodDisplay displayMethodAs ) { } }
Set < ApiDoc > apiDocs = new TreeSet < ApiDoc > ( ) ; for ( Class < ? > controller : classes ) { ApiDoc apiDoc = getApiDoc ( controller , displayMethodAs ) ; apiDocs . add ( apiDoc ) ; } return apiDocs ;
public class RtfDocumentHeader { /** * Converts a HeaderFooter into a RtfHeaderFooterGroup . Depending on which class * the HeaderFooter is , the correct RtfHeaderFooterGroup is created . * @ param hf The HeaderFooter to convert . * @ param type Whether the conversion is being done on a footer or header * @ return The converted RtfHeaderFooterGroup . * @ see com . lowagie . text . rtf . headerfooter . RtfHeaderFooter * @ see com . lowagie . text . rtf . headerfooter . RtfHeaderFooterGroup */ private RtfHeaderFooterGroup convertHeaderFooter ( HeaderFooter hf , int type ) { } }
if ( hf != null ) { if ( hf instanceof RtfHeaderFooterGroup ) { return new RtfHeaderFooterGroup ( this . document , ( RtfHeaderFooterGroup ) hf , type ) ; } else if ( hf instanceof RtfHeaderFooter ) { return new RtfHeaderFooterGroup ( this . document , ( RtfHeaderFooter ) hf , type ) ; } else { return new RtfHeaderFooterGroup ( this . document , hf , type ) ; } } else { return new RtfHeaderFooterGroup ( this . document , type ) ; }
public class SelectSubPlanAssembler { /** * Pull a join order out of the join orders deque , compute all possible plans * for that join order , then append them to the computed plans deque . */ @ Override protected AbstractPlanNode nextPlan ( ) { } }
// repeat ( usually run once ) until plans are created // or no more plans can be created while ( m_plans . size ( ) == 0 ) { // get the join order for us to make plans out of JoinNode joinTree = m_joinOrders . poll ( ) ; // no more join orders = > no more plans to generate if ( joinTree == null ) { return null ; } // Analyze join and filter conditions joinTree . analyzeJoinExpressions ( m_parsedStmt . m_noTableSelectionList ) ; // a query that is a little too quirky or complicated . if ( ! m_parsedStmt . m_noTableSelectionList . isEmpty ( ) ) { throw new PlanningErrorException ( "Join with filters that do not depend on joined tables is not supported in VoltDB" ) ; } if ( ! m_partitioning . wasSpecifiedAsSingle ( ) ) { // Now that analyzeJoinExpressions has done its job of properly categorizing // and placing the various filters that the HSQL parser tends to leave in the strangest // configuration , this is the first opportunity to analyze WHERE and JOIN filters ' // effects on statement partitioning . // But this causes the analysis to be run based on a particular join order . // Which join orders does this analysis actually need to be run on ? // Can it be run on the first join order and be assumed to hold for all join orders ? // If there is a join order that fails to generate a single viable plan , is its // determination of partitioning ( or partitioning failure ) always valid for other // join orders , or should the analysis be repeated on a viable join order // in that case ? // For now , analyze each join order independently and when an invalid partitioning is // detected , skip the plan generation for that particular ordering . // If this causes all plans to be skipped , commonly the case , the PlanAssembler // should propagate an error message identifying partitioning as the problem . HashMap < AbstractExpression , Set < AbstractExpression > > valueEquivalence = joinTree . getAllEquivalenceFilters ( ) ; Collection < StmtTableScan > scans = m_parsedStmt . allScans ( ) ; m_partitioning . analyzeForMultiPartitionAccess ( scans , valueEquivalence ) ; if ( ! m_partitioning . isJoinValid ( ) ) { // The case of more than one independent partitioned table // would result in an illegal plan with more than two fragments . // Don ' t throw a planning error here , in case the problem is just with this // particular join order , but do leave a hint to the PlanAssembler in case // the failure is unanimous - - a common case . m_recentErrorMsg = m_partitioning . getJoinInvalidReason ( ) ; // This join order , at least , is not worth trying to plan . continue ; } } generateMorePlansForJoinTree ( joinTree ) ; } return m_plans . poll ( ) ;
public class Funcs { /** * Returns the parse { @ link Function } with given class type . * Supports : { @ link Byte } , { @ link Short } , { @ link Integer } , { @ link Long } , * { @ link Float } , { @ link Double } , { @ link BigInteger } , { @ link BigDecimal } , { @ link Date } * @ param type * @ return */ public static < O > Function < Object , O > getParseFunction ( Class < O > type ) { } }
return new StructBehavior < Function < Object , O > > ( type ) { @ SuppressWarnings ( "unchecked" ) @ Override protected Function < Object , O > booleanIf ( ) { return ( Function < Object , O > ) TO_BOOLEAN ; } @ SuppressWarnings ( "unchecked" ) @ Override protected Function < Object , O > byteIf ( ) { return ( Function < Object , O > ) TO_BYTE ; } @ SuppressWarnings ( "unchecked" ) @ Override protected Function < Object , O > characterIf ( ) { return ( Function < Object , O > ) TO_CHARACTER ; } @ SuppressWarnings ( "unchecked" ) @ Override protected Function < Object , O > doubleIf ( ) { return ( Function < Object , O > ) TO_DOUBLE ; } @ SuppressWarnings ( "unchecked" ) @ Override protected Function < Object , O > floatIf ( ) { return ( Function < Object , O > ) TO_FLOAT ; } @ SuppressWarnings ( "unchecked" ) @ Override protected Function < Object , O > integerIf ( ) { return ( Function < Object , O > ) TO_INTEGER ; } @ SuppressWarnings ( "unchecked" ) @ Override protected Function < Object , O > longIf ( ) { return ( Function < Object , O > ) TO_LONG ; } @ SuppressWarnings ( "unchecked" ) @ Override protected Function < Object , O > shortIf ( ) { return ( Function < Object , O > ) TO_SHORT ; } @ Override protected Function < Object , O > nullIf ( ) { return null ; } @ Override protected Function < Object , O > noneMatched ( ) { return new ValueBehaviorAdapter < Function < Object , O > > ( delegate ) { @ SuppressWarnings ( "unchecked" ) @ Override protected Function < Object , O > dateIf ( Date resolvedP ) { return ( Function < Object , O > ) TO_DATE ; } @ SuppressWarnings ( "unchecked" ) @ Override protected Function < Object , O > bigDecimalIf ( BigDecimal resolvedP ) { return ( Function < Object , O > ) TO_BIGDECIMAL ; } @ SuppressWarnings ( "unchecked" ) @ Override protected Function < Object , O > bigIntegerIf ( BigInteger resolvedP ) { return ( Function < Object , O > ) TO_BIGINTEGER ; } @ Override protected Function < Object , O > defaultBehavior ( ) { return null ; } } . doDetect ( ) ; } } . doDetect ( ) ;
public class BinaryHashBucketArea { /** * Append record and insert to bucket . */ boolean appendRecordAndInsert ( BinaryRow record , int hashCode ) throws IOException { } }
final int posHashCode = findBucket ( hashCode ) ; // get the bucket for the given hash code final int bucketArrayPos = posHashCode >> table . bucketsPerSegmentBits ; final int bucketInSegmentPos = ( posHashCode & table . bucketsPerSegmentMask ) << BUCKET_SIZE_BITS ; final MemorySegment bucket = this . buckets [ bucketArrayPos ] ; if ( ! table . tryDistinctBuildRow || ! partition . isInMemory ( ) || ! findFirstSameBuildRow ( bucket , hashCode , bucketInSegmentPos , record ) ) { int pointer = partition . insertIntoBuildBuffer ( record ) ; if ( pointer != - 1 ) { // record was inserted into an in - memory partition . a pointer must be inserted into the buckets insertToBucket ( bucket , bucketInSegmentPos , hashCode , pointer , true , true ) ; return true ; } else { return false ; } } else { // distinct build rows in memory . return true ; }
public class RawResponse { /** * If decompress http response body . Default is true . */ public RawResponse decompress ( boolean decompress ) { } }
return new RawResponse ( method , url , statusCode , statusLine , cookies , headers , body , charset , decompress ) ;
public class DERUniversalString { /** * UniversalStrings have characters which are 4 bytes long - for the * moment we just return them in Hex . . . */ public String getString ( ) { } }
StringBuffer buf = new StringBuffer ( ) ; for ( int i = 0 ; i != string . length ; i ++ ) { buf . append ( table [ ( string [ i ] >>> 4 ) % 0xf ] ) ; buf . append ( table [ string [ i ] & 0xf ] ) ; } return buf . toString ( ) ;
public class SeverityClassificationPulldownAction { /** * Set the menu to given severity level . * @ param severity * the severity level ( 1 . . 5) */ private void selectSeverity ( int severity ) { } }
// Severity is 1 - based , but the menu item list is 0 - based int index = severity - 1 ; for ( int i = 0 ; i < severityItemList . length ; ++ i ) { MenuItem menuItem = severityItemList [ i ] ; menuItem . setEnabled ( true ) ; menuItem . setSelection ( i == index ) ; }
public class Javalin { /** * Adds a PATCH request handler with the given roles for the specified path to the instance . * Requires an access manager to be set on the instance . * @ see AccessManager * @ see < a href = " https : / / javalin . io / documentation # handlers " > Handlers in docs < / a > */ public Javalin patch ( @ NotNull String path , @ NotNull Handler handler , @ NotNull Set < Role > permittedRoles ) { } }
return addHandler ( HandlerType . PATCH , path , handler , permittedRoles ) ;
public class GitlabAPI { /** * Get the comments of a commit * @ param projectId ( required ) - The ID of a project * @ param sha ( required ) - The name of a repository branch or tag or if not given the default branch * @ return A CommitComment * @ throws IOException on gitlab api call error * @ see < a href = " http : / / doc . gitlab . com / ce / api / commits . html # post - comment - to - commit " > http : / / doc . gitlab . com / ce / api / commits . html # post - comment - to - commit < / a > */ public List < CommitComment > getCommitComments ( Integer projectId , String sha ) throws IOException { } }
String tailUrl = GitlabProject . URL + "/" + sanitizeProjectId ( projectId ) + "/repository/commits/" + sha + CommitComment . URL ; return Arrays . asList ( retrieve ( ) . to ( tailUrl , CommitComment [ ] . class ) ) ;
public class ValidatorFactoryMaker { /** * Unbind reference added automatically from addFactory annotation */ @ SuppressWarnings ( "javadoc" ) public void removeFactory ( ValidatorFactory factory ) { } }
// this is to avoid adding items to the cache that were removed while // iterating synchronized ( map ) { providers . remove ( factory ) ; map . clear ( ) ; }
public class ProviderConfig { /** * Load and instantiate the Provider described by this class . * NOTE use of doPrivileged ( ) . * @ return null if the Provider could not be loaded * @ throws ProviderException if executing the Provider ' s constructor * throws a ProviderException . All other Exceptions are ignored . */ private Provider doLoadProvider ( ) { } }
return AccessController . doPrivileged ( new PrivilegedAction < Provider > ( ) { public Provider run ( ) { // if ( debug ! = null ) { // debug . println ( " Loading provider : " + ProviderConfig . this ) ; try { // First try with the boot classloader . return initProvider ( className , Object . class . getClassLoader ( ) ) ; } catch ( Exception e1 ) { // If that fails , try with the system classloader . try { return initProvider ( className , ClassLoader . getSystemClassLoader ( ) ) ; } catch ( Exception e ) { Throwable t ; if ( e instanceof InvocationTargetException ) { t = ( ( InvocationTargetException ) e ) . getCause ( ) ; } else { t = e ; } // if ( debug ! = null ) { // debug . println ( " Error loading provider " + ProviderConfig . this ) ; // t . printStackTrace ( ) ; // provider indicates fatal error , pass through exception if ( t instanceof ProviderException ) { throw ( ProviderException ) t ; } // provider indicates that loading should not be retried if ( t instanceof UnsupportedOperationException ) { disableLoad ( ) ; } return null ; } } } } ) ;
public class BlobContainersInner { /** * Sets the ImmutabilityPolicy to Locked state . The only action allowed on a Locked policy is ExtendImmutabilityPolicy action . ETag in If - Match is required for this operation . * @ param resourceGroupName The name of the resource group within the user ' s subscription . The name is case insensitive . * @ param accountName The name of the storage account within the specified resource group . Storage account names must be between 3 and 24 characters in length and use numbers and lower - case letters only . * @ param containerName The name of the blob container within the specified storage account . Blob container names must be between 3 and 63 characters in length and use numbers , lower - case letters and dash ( - ) only . Every dash ( - ) character must be immediately preceded and followed by a letter or number . * @ param ifMatch The entity state ( ETag ) version of the immutability policy to update . A value of " * " can be used to apply the operation only if the immutability policy already exists . If omitted , this operation will always be applied . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the ImmutabilityPolicyInner object */ public Observable < ServiceResponseWithHeaders < ImmutabilityPolicyInner , BlobContainersLockImmutabilityPolicyHeaders > > lockImmutabilityPolicyWithServiceResponseAsync ( String resourceGroupName , String accountName , String containerName , String ifMatch ) { } }
if ( resourceGroupName == null ) { throw new IllegalArgumentException ( "Parameter resourceGroupName is required and cannot be null." ) ; } if ( accountName == null ) { throw new IllegalArgumentException ( "Parameter accountName is required and cannot be null." ) ; } if ( containerName == null ) { throw new IllegalArgumentException ( "Parameter containerName is required and cannot be null." ) ; } if ( this . client . subscriptionId ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.subscriptionId() is required and cannot be null." ) ; } if ( this . client . apiVersion ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.apiVersion() is required and cannot be null." ) ; } if ( ifMatch == null ) { throw new IllegalArgumentException ( "Parameter ifMatch is required and cannot be null." ) ; } return service . lockImmutabilityPolicy ( resourceGroupName , accountName , containerName , this . client . subscriptionId ( ) , this . client . apiVersion ( ) , ifMatch , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponseWithHeaders < ImmutabilityPolicyInner , BlobContainersLockImmutabilityPolicyHeaders > > > ( ) { @ Override public Observable < ServiceResponseWithHeaders < ImmutabilityPolicyInner , BlobContainersLockImmutabilityPolicyHeaders > > call ( Response < ResponseBody > response ) { try { ServiceResponseWithHeaders < ImmutabilityPolicyInner , BlobContainersLockImmutabilityPolicyHeaders > clientResponse = lockImmutabilityPolicyDelegate ( response ) ; return Observable . just ( clientResponse ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ;
public class BaseTraceFormatter { /** * The messages log always uses the same / enhanced format , and relies on already formatted * messages . This does the formatting needed to take a message suitable for console . log * and wrap it to fit into messages . log . * @ param genData * @ return Formatted string for messages . log */ public String messageLogFormat ( GenericData genData ) { } }
// This is a very light trace format , based on enhanced : StringBuilder sb = new StringBuilder ( 256 ) ; String name = null ; KeyValuePair [ ] pairs = genData . getPairs ( ) ; KeyValuePair kvp = null ; String message = null ; Long datetime = null ; String level = "" ; String loggerName = null ; String srcClassName = null ; String throwable = null ; for ( KeyValuePair p : pairs ) { if ( p != null && ! p . isList ( ) ) { kvp = p ; if ( kvp . getKey ( ) . equals ( LogFieldConstants . MESSAGE ) ) { message = kvp . getStringValue ( ) ; } else if ( kvp . getKey ( ) . equals ( LogFieldConstants . IBM_DATETIME ) ) { datetime = kvp . getLongValue ( ) ; } else if ( kvp . getKey ( ) . equals ( LogFieldConstants . SEVERITY ) ) { level = kvp . getStringValue ( ) ; } else if ( kvp . getKey ( ) . equals ( LogFieldConstants . MODULE ) ) { loggerName = kvp . getStringValue ( ) ; } else if ( kvp . getKey ( ) . equals ( LogFieldConstants . IBM_CLASSNAME ) ) { srcClassName = kvp . getStringValue ( ) ; } else if ( kvp . getKey ( ) . equals ( LogFieldConstants . THROWABLE ) ) { throwable = kvp . getStringValue ( ) ; } } } name = nonNullString ( loggerName , srcClassName ) ; sb . append ( '[' ) . append ( DateFormatHelper . formatTime ( datetime , useIsoDateFormat ) ) . append ( "] " ) ; sb . append ( DataFormatHelper . getThreadId ( ) ) . append ( ' ' ) ; formatFixedString ( sb , name , enhancedNameLength ) ; sb . append ( " " + level + " " ) ; // sym has built - in padding sb . append ( message ) ; if ( throwable != null ) { sb . append ( LoggingConstants . nl ) . append ( throwable ) ; } return sb . toString ( ) ;
public class HybridViterbi { /** * 构造并初始化网格 * @ param inst * 样本实例 * @ return 双链网格 */ private Node [ ] [ ] [ ] initialLattice ( Instance inst ) { } }
int [ ] [ ] [ ] data = ( int [ ] [ ] [ ] ) inst . getData ( ) ; length = inst . length ( ) ; Node [ ] [ ] [ ] lattice = new Node [ 2 ] [ length ] [ ] ; for ( int i = 0 ; i < length ; i ++ ) { lattice [ 0 ] [ i ] = new Node [ ysize [ 0 ] ] ; for ( int j = 0 ; j < ysize [ 0 ] ; j ++ ) { lattice [ 0 ] [ i ] [ j ] = new Node ( ysize [ 0 ] , ysize [ 1 ] ) ; } initialClique ( lattice [ 0 ] [ i ] , data [ 0 ] [ i ] , orders [ 0 ] , ysize [ 0 ] , ysize [ 1 ] ) ; lattice [ 1 ] [ i ] = new Node [ ysize [ 1 ] ] ; for ( int j = 0 ; j < ysize [ 1 ] ; j ++ ) { lattice [ 1 ] [ i ] [ j ] = new Node ( ysize [ 1 ] , ysize [ 0 ] ) ; } initialClique ( lattice [ 1 ] [ i ] , data [ 1 ] [ i ] , orders [ 1 ] , ysize [ 1 ] , ysize [ 0 ] ) ; } return lattice ;
public class ApiOvhEmailexchange { /** * Create new archive mailbox * REST : POST / email / exchange / { organizationName } / service / { exchangeService } / account / { primaryEmailAddress } / archive * @ param quota [ required ] Archive mailbox quota ( if not provided mailbox quota will be taken ) * @ param organizationName [ required ] The internal name of your exchange organization * @ param exchangeService [ required ] The internal name of your exchange service * @ param primaryEmailAddress [ required ] Default email for this mailbox */ public OvhTask organizationName_service_exchangeService_account_primaryEmailAddress_archive_POST ( String organizationName , String exchangeService , String primaryEmailAddress , Long quota ) throws IOException { } }
String qPath = "/email/exchange/{organizationName}/service/{exchangeService}/account/{primaryEmailAddress}/archive" ; StringBuilder sb = path ( qPath , organizationName , exchangeService , primaryEmailAddress ) ; HashMap < String , Object > o = new HashMap < String , Object > ( ) ; addBody ( o , "quota" , quota ) ; String resp = exec ( qPath , "POST" , sb . toString ( ) , o ) ; return convertTo ( resp , OvhTask . class ) ;
public class Reflector { /** * to get a visible Propety ( Field or Getter ) of a object * @ param obj Object to invoke * @ param prop property to call * @ return property value */ public static Object getProperty ( Object obj , String prop , Object defaultValue ) { } }
// first try field Field [ ] fields = getFieldsIgnoreCase ( obj . getClass ( ) , prop , null ) ; if ( ! ArrayUtil . isEmpty ( fields ) ) { try { return fields [ 0 ] . get ( obj ) ; } catch ( Throwable t ) { ExceptionUtil . rethrowIfNecessary ( t ) ; } } // then getter try { char first = prop . charAt ( 0 ) ; if ( first >= '0' && first <= '9' ) return defaultValue ; return getGetter ( obj . getClass ( ) , prop ) . invoke ( obj ) ; } catch ( Throwable e1 ) { ExceptionUtil . rethrowIfNecessary ( e1 ) ; return defaultValue ; }
public class BsonDecoder { /** * default visibility for unit test */ String decodeCString ( ByteBuf buffer ) throws IOException { } }
int length = buffer . bytesBefore ( BsonConstants . STRING_TERMINATION ) ; if ( length < 0 ) throw new IOException ( "string termination not found" ) ; String result = buffer . toString ( buffer . readerIndex ( ) , length , StandardCharsets . UTF_8 ) ; buffer . skipBytes ( length + 1 ) ; return result ;
public class CmsVisitEntryFilter { /** * Returns an extended filter with the starting date restriction . < p > * @ param from the starting date to filter * @ return an extended filter with the starting date restriction */ public CmsVisitEntryFilter filterFrom ( long from ) { } }
CmsVisitEntryFilter filter = ( CmsVisitEntryFilter ) clone ( ) ; filter . m_dateFrom = from ; return filter ;
public class FalseFriendRuleHandler { @ Override public void startElement ( String namespaceURI , String lName , String qName , Attributes attrs ) throws SAXException { } }
if ( qName . equals ( RULE ) ) { translations . clear ( ) ; id = attrs . getValue ( "id" ) ; if ( ! ( inRuleGroup && defaultOff ) ) { defaultOff = "off" . equals ( attrs . getValue ( "default" ) ) ; } if ( inRuleGroup && id == null ) { id = ruleGroupId ; } correctExamples = new ArrayList < > ( ) ; incorrectExamples = new ArrayList < > ( ) ; } else if ( qName . equals ( PATTERN ) ) { inPattern = true ; String languageStr = attrs . getValue ( "lang" ) ; if ( Languages . isLanguageSupported ( languageStr ) ) { language = Languages . getLanguageForShortCode ( languageStr ) ; } } else if ( qName . equals ( TOKEN ) ) { setToken ( attrs ) ; } else if ( qName . equals ( TRANSLATION ) ) { inTranslation = true ; String languageStr = attrs . getValue ( "lang" ) ; if ( Languages . isLanguageSupported ( languageStr ) ) { Language tmpLang = Languages . getLanguageForShortCode ( languageStr ) ; currentTranslationLanguage = tmpLang ; if ( tmpLang . equalsConsiderVariantsIfSpecified ( motherTongue ) ) { translationLanguage = tmpLang ; } } } else if ( qName . equals ( EXAMPLE ) ) { correctExample = new StringBuilder ( ) ; incorrectExample = new StringBuilder ( ) ; if ( attrs . getValue ( TYPE ) . equals ( "incorrect" ) ) { inIncorrectExample = true ; } else if ( attrs . getValue ( TYPE ) . equals ( "correct" ) ) { inCorrectExample = true ; } else if ( attrs . getValue ( TYPE ) . equals ( "triggers_error" ) ) { throw new RuntimeException ( "'triggers_error' is not supported for false friend XML" ) ; } } else if ( qName . equals ( MESSAGE ) ) { inMessage = true ; message = new StringBuilder ( ) ; } else if ( qName . equals ( RULEGROUP ) ) { ruleGroupId = attrs . getValue ( "id" ) ; inRuleGroup = true ; defaultOff = "off" . equals ( attrs . getValue ( DEFAULT ) ) ; }
public class M2tsSettingsMarshaller { /** * Marshall the given parameter object . */ public void marshall ( M2tsSettings m2tsSettings , ProtocolMarshaller protocolMarshaller ) { } }
if ( m2tsSettings == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( m2tsSettings . getAbsentInputAudioBehavior ( ) , ABSENTINPUTAUDIOBEHAVIOR_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getArib ( ) , ARIB_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getAribCaptionsPid ( ) , ARIBCAPTIONSPID_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getAribCaptionsPidControl ( ) , ARIBCAPTIONSPIDCONTROL_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getAudioBufferModel ( ) , AUDIOBUFFERMODEL_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getAudioFramesPerPes ( ) , AUDIOFRAMESPERPES_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getAudioPids ( ) , AUDIOPIDS_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getAudioStreamType ( ) , AUDIOSTREAMTYPE_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getBitrate ( ) , BITRATE_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getBufferModel ( ) , BUFFERMODEL_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getCcDescriptor ( ) , CCDESCRIPTOR_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getDvbNitSettings ( ) , DVBNITSETTINGS_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getDvbSdtSettings ( ) , DVBSDTSETTINGS_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getDvbSubPids ( ) , DVBSUBPIDS_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getDvbTdtSettings ( ) , DVBTDTSETTINGS_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getDvbTeletextPid ( ) , DVBTELETEXTPID_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getEbif ( ) , EBIF_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getEbpAudioInterval ( ) , EBPAUDIOINTERVAL_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getEbpLookaheadMs ( ) , EBPLOOKAHEADMS_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getEbpPlacement ( ) , EBPPLACEMENT_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getEcmPid ( ) , ECMPID_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getEsRateInPes ( ) , ESRATEINPES_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getEtvPlatformPid ( ) , ETVPLATFORMPID_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getEtvSignalPid ( ) , ETVSIGNALPID_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getFragmentTime ( ) , FRAGMENTTIME_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getKlv ( ) , KLV_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getKlvDataPids ( ) , KLVDATAPIDS_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getNullPacketBitrate ( ) , NULLPACKETBITRATE_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getPatInterval ( ) , PATINTERVAL_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getPcrControl ( ) , PCRCONTROL_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getPcrPeriod ( ) , PCRPERIOD_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getPcrPid ( ) , PCRPID_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getPmtInterval ( ) , PMTINTERVAL_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getPmtPid ( ) , PMTPID_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getProgramNum ( ) , PROGRAMNUM_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getRateMode ( ) , RATEMODE_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getScte27Pids ( ) , SCTE27PIDS_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getScte35Control ( ) , SCTE35CONTROL_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getScte35Pid ( ) , SCTE35PID_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getSegmentationMarkers ( ) , SEGMENTATIONMARKERS_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getSegmentationStyle ( ) , SEGMENTATIONSTYLE_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getSegmentationTime ( ) , SEGMENTATIONTIME_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getTimedMetadataBehavior ( ) , TIMEDMETADATABEHAVIOR_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getTimedMetadataPid ( ) , TIMEDMETADATAPID_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getTransportStreamId ( ) , TRANSPORTSTREAMID_BINDING ) ; protocolMarshaller . marshall ( m2tsSettings . getVideoPid ( ) , VIDEOPID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class FileUtils { /** * Read a text file into a String , optionally limiting the length . * @ param file to read ( will not seek , so things like / proc files are OK ) * @ param max length ( positive for head , negative of tail , 0 for no limit ) * @ param ellipsis to add of the file was truncated ( can be null ) * @ return the contents of the file , possibly truncated * @ throws IOException if something goes wrong reading the file */ public static String readTextFile ( File file , int max , String ellipsis ) throws IOException { } }
InputStream input = new FileInputStream ( file ) ; try { if ( max > 0 ) { // " head " mode : read the first N bytes byte [ ] data = new byte [ max + 1 ] ; int length = input . read ( data ) ; if ( length <= 0 ) return "" ; if ( length <= max ) return new String ( data , 0 , length ) ; if ( ellipsis == null ) return new String ( data , 0 , max ) ; return new String ( data , 0 , max ) + ellipsis ; } else if ( max < 0 ) { // " tail " mode : read it all , keep the last N int len ; boolean rolled = false ; byte [ ] last = null , data = null ; do { if ( last != null ) rolled = true ; byte [ ] tmp = last ; last = data ; data = tmp ; if ( data == null ) data = new byte [ - max ] ; len = input . read ( data ) ; } while ( len == data . length ) ; if ( last == null && len <= 0 ) return "" ; if ( last == null ) return new String ( data , 0 , len ) ; if ( len > 0 ) { rolled = true ; System . arraycopy ( last , len , last , 0 , last . length - len ) ; System . arraycopy ( data , 0 , last , last . length - len , len ) ; } if ( ellipsis == null || ! rolled ) return new String ( last ) ; return ellipsis + new String ( last ) ; } else { // " cat " mode : read it all ByteArrayOutputStream contents = new ByteArrayOutputStream ( ) ; int len ; byte [ ] data = new byte [ 1024 ] ; do { len = input . read ( data ) ; if ( len > 0 ) contents . write ( data , 0 , len ) ; } while ( len == data . length ) ; return contents . toString ( ) ; } } finally { input . close ( ) ; }
public class ArrayCoreMap { /** * Reduces memory consumption to the minimum for representing the values * currently stored stored in this object . */ public void compact ( ) { } }
if ( keys . length > size ) { Class < ? > [ ] newKeys = new Class < ? > [ size ] ; Object [ ] newVals = new Object [ size ] ; System . arraycopy ( keys , 0 , newKeys , 0 , size ) ; System . arraycopy ( values , 0 , newVals , 0 , size ) ; keys = newKeys ; values = newVals ; }
public class PdfPage { /** * Rotates the mediabox , but not the text in it . * @ returna < CODE > PdfRectangle < / CODE > */ PdfRectangle rotateMediaBox ( ) { } }
this . mediaBox = mediaBox . rotate ( ) ; put ( PdfName . MEDIABOX , this . mediaBox ) ; return this . mediaBox ;
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link DMSAngleType } { @ code > } * @ param value * Java instance representing xml element ' s value . * @ return * the new instance of { @ link JAXBElement } { @ code < } { @ link DMSAngleType } { @ code > } */ @ XmlElementDecl ( namespace = "http://www.opengis.net/gml" , name = "dmsAngleValue" ) public JAXBElement < DMSAngleType > createDmsAngleValue ( DMSAngleType value ) { } }
return new JAXBElement < DMSAngleType > ( _DmsAngleValue_QNAME , DMSAngleType . class , null , value ) ;
public class CliCommandBuilder { /** * Sets the commands to execute . * @ param commands the commands to execute * @ return the builder */ public CliCommandBuilder setCommands ( final Iterable < String > commands ) { } }
if ( commands == null ) { addCliArgument ( CliArgument . COMMANDS , null ) ; return this ; } final StringBuilder cmds = new StringBuilder ( ) ; for ( final Iterator < String > iterator = commands . iterator ( ) ; iterator . hasNext ( ) ; ) { cmds . append ( iterator . next ( ) ) ; if ( iterator . hasNext ( ) ) cmds . append ( ',' ) ; } addCliArgument ( CliArgument . COMMANDS , cmds . toString ( ) ) ; return this ;
public class CommonOps_DDRM { /** * Inserts into the specified elements of dst the source matrix . * < pre > * for i in len ( rows ) : * for j in len ( cols ) : * dst ( rows [ i ] , cols [ j ] ) = src ( i , j ) * < / pre > * @ param src Source matrix . Not modified . * @ param dst output matrix . Must be correct shape . * @ param rows array of row indexes * @ param rowsSize maximum element in row array * @ param cols array of column indexes * @ param colsSize maximum element in column array */ public static void insert ( DMatrixRMaj src , DMatrixRMaj dst , int rows [ ] , int rowsSize , int cols [ ] , int colsSize ) { } }
if ( rowsSize != src . numRows || colsSize != src . numCols ) throw new MatrixDimensionException ( "Unexpected number of rows and/or columns in dst matrix" ) ; int indexSrc = 0 ; for ( int i = 0 ; i < rowsSize ; i ++ ) { int indexDstRow = dst . numCols * rows [ i ] ; for ( int j = 0 ; j < colsSize ; j ++ ) { dst . data [ indexDstRow + cols [ j ] ] = src . data [ indexSrc ++ ] ; } }
public class FileCacheManager { /** * Reads the cache file . */ JsonNode readCacheFile ( ) { } }
if ( cacheFile == null || ! this . checkCacheLockFile ( ) ) { // no cache or the cache is not valid . return null ; } try { if ( ! cacheFile . exists ( ) ) { LOGGER . debug ( "Cache file doesn't exists. File: {}" , cacheFile ) ; return null ; } try ( Reader reader = new InputStreamReader ( new FileInputStream ( cacheFile ) , DEFAULT_FILE_ENCODING ) ) { return OBJECT_MAPPER . readTree ( reader ) ; } } catch ( IOException ex ) { LOGGER . debug ( "Failed to read the cache file. No worry. File: {}, Err: {}" , cacheFile , ex ) ; } return null ;
public class DwgUtil { /** * Read a double value from a group of unsigned bytes and a default double * @ param data Array of unsigned bytes obtained from the DWG binary file * @ param offset The current bit offset where the value begins * @ param defVal Default double value * @ throws Exception If an unexpected bit value is found in the DWG file . Occurs * when we are looking for LwPolylines . * @ return Vector This vector has two parts . First is an int value that represents * the new offset , and second is the double value */ public static Vector getDefaultDouble ( int [ ] data , int offset , double defVal ) throws Exception { } }
int flags = ( ( Integer ) getBits ( data , 2 , offset ) ) . intValue ( ) ; int read = 2 ; double val ; if ( flags == 0x0 ) { val = defVal ; } else { int _offset = offset + 2 ; String dstr ; if ( flags == 0x3 ) { byte [ ] bytes = ( byte [ ] ) getBits ( data , 64 , _offset ) ; ByteBuffer bb = ByteBuffer . wrap ( bytes ) ; bb . order ( ByteOrder . LITTLE_ENDIAN ) ; val = bb . getDouble ( ) ; read = 66 ; } else { byte [ ] dstrArrayAux = new byte [ 8 ] ; int [ ] doubleOffset = new int [ ] { 0 } ; ByteUtils . doubleToBytes ( defVal , dstrArrayAux , doubleOffset ) ; byte [ ] dstrArrayAuxx = new byte [ 8 ] ; dstrArrayAuxx [ 0 ] = dstrArrayAux [ 7 ] ; dstrArrayAuxx [ 1 ] = dstrArrayAux [ 6 ] ; dstrArrayAuxx [ 2 ] = dstrArrayAux [ 5 ] ; dstrArrayAuxx [ 3 ] = dstrArrayAux [ 4 ] ; dstrArrayAuxx [ 4 ] = dstrArrayAux [ 3 ] ; dstrArrayAuxx [ 5 ] = dstrArrayAux [ 2 ] ; dstrArrayAuxx [ 6 ] = dstrArrayAux [ 1 ] ; dstrArrayAuxx [ 7 ] = dstrArrayAux [ 0 ] ; int [ ] dstrArrayAuxxx = new int [ 8 ] ; for ( int i = 0 ; i < dstrArrayAuxxx . length ; i ++ ) { dstrArrayAuxxx [ i ] = ByteUtils . getUnsigned ( dstrArrayAuxx [ i ] ) ; } byte [ ] dstrArray = new byte [ 8 ] ; for ( int i = 0 ; i < dstrArray . length ; i ++ ) { dstrArray [ i ] = ( byte ) dstrArrayAuxxx [ i ] ; } if ( flags == 0x1 ) { byte [ ] ddArray = ( byte [ ] ) getBits ( data , 32 , _offset ) ; dstrArray [ 0 ] = ddArray [ 0 ] ; dstrArray [ 1 ] = ddArray [ 1 ] ; dstrArray [ 2 ] = ddArray [ 2 ] ; dstrArray [ 3 ] = ddArray [ 3 ] ; read = 34 ; } else { byte [ ] ddArray = ( byte [ ] ) getBits ( data , 48 , _offset ) ; dstrArray [ 4 ] = ddArray [ 0 ] ; dstrArray [ 5 ] = ddArray [ 1 ] ; dstrArray [ 0 ] = ddArray [ 2 ] ; dstrArray [ 1 ] = ddArray [ 3 ] ; dstrArray [ 2 ] = ddArray [ 4 ] ; dstrArray [ 3 ] = ddArray [ 5 ] ; read = 50 ; } ByteBuffer bb = ByteBuffer . wrap ( dstrArray ) ; bb . order ( ByteOrder . LITTLE_ENDIAN ) ; val = bb . getDouble ( ) ; } } Vector v = new Vector ( ) ; v . add ( new Integer ( offset + read ) ) ; v . add ( new Double ( val ) ) ; return v ;
public class ChaiProviderFactory { /** * Returns a thread - safe " wrapped " { @ code ChaiProvider } . All ldap operations will be forced through a single * lock and then sent to the backing provider . * Depending on the ldap server and the configured timeouts , calling methods on a synchronized * provider may result in significant blocking delays . * @ param theProvider The provider to be " wrapped " in a synchronized provider . * @ return A synchronized view of the specified provider */ private static ChaiProvider synchronizedProvider ( final ChaiProvider theProvider ) { } }
if ( theProvider instanceof SynchronizedProvider ) { return theProvider ; } else { return ( ChaiProvider ) Proxy . newProxyInstance ( theProvider . getClass ( ) . getClassLoader ( ) , theProvider . getClass ( ) . getInterfaces ( ) , new SynchronizedProvider ( theProvider ) ) ; }
public class S3StorageProvider { /** * Counts the number of items in a space up to the maxCount . If maxCount * is reached or exceeded , the returned string will indicate this with a * trailing ' + ' character ( e . g . 1000 + ) . * Note that anecdotal evidence shows that this method of counting * ( using size of chunked calls ) is faster in most cases than enumerating * the Iteration : StorageProviderUtil . count ( getSpaceContents ( spaceId , null ) ) */ protected String getSpaceCount ( String spaceId , int maxCount ) { } }
List < String > spaceContentChunk = null ; long count = 0 ; do { String marker = null ; if ( spaceContentChunk != null && spaceContentChunk . size ( ) > 0 ) { marker = spaceContentChunk . get ( spaceContentChunk . size ( ) - 1 ) ; } spaceContentChunk = getSpaceContentsChunked ( spaceId , null , MAX_ITEM_COUNT , marker ) ; count += spaceContentChunk . size ( ) ; } while ( spaceContentChunk . size ( ) > 0 && count < maxCount ) ; String suffix = "" ; if ( count >= maxCount ) { suffix = "+" ; } return String . valueOf ( count ) + suffix ;
public class Days { /** * Obtains a { @ code Days } consisting of the number of days between two dates . * The start date is included , but the end date is not . * The result of this method can be negative if the end is before the start . * @ param startDateInclusive the start date , inclusive , not null * @ param endDateExclusive the end date , exclusive , not null * @ return the number of days between this date and the end date , not null */ public static Days between ( Temporal startDateInclusive , Temporal endDateExclusive ) { } }
return of ( Math . toIntExact ( DAYS . between ( startDateInclusive , endDateExclusive ) ) ) ;
public class DiskFileItem { /** * Returns the size of the file . * @ return The size of the file , in bytes . */ @ Nonnegative public long getSize ( ) { } }
if ( m_nSize >= 0 ) return m_nSize ; if ( m_aCachedContent != null ) return m_aCachedContent . length ; if ( m_aDFOS . isInMemory ( ) ) return m_aDFOS . getDataLength ( ) ; return m_aDFOS . getFile ( ) . length ( ) ;
public class OsgiPropertyUtils { /** * Retrieve the value of the specified property from framework / system properties . * Value is converted and returned as an int . * @ param propertyName Name of property * @ param defaultValue Default value to return if property is not set * @ return Property or default value as an int */ public static int getInteger ( String propertyName , int defaultValue ) { } }
String tmpObj = get ( propertyName ) ; if ( tmpObj != null ) { try { return Integer . parseInt ( tmpObj ) ; } catch ( NumberFormatException e ) { } } return defaultValue ;
public class TransactionInput { /** * Connects this input to the relevant output of the referenced transaction if it ' s in the given map . * Connecting means updating the internal pointers and spent flags . If the mode is to ABORT _ ON _ CONFLICT then * the spent output won ' t be changed , but the outpoint . fromTx pointer will still be updated . * @ param transactions Map of txhash to transaction . * @ param mode Whether to abort if there ' s a pre - existing connection or not . * @ return NO _ SUCH _ TX if the prevtx wasn ' t found , ALREADY _ SPENT if there was a conflict , SUCCESS if not . */ public ConnectionResult connect ( Map < Sha256Hash , Transaction > transactions , ConnectMode mode ) { } }
Transaction tx = transactions . get ( outpoint . getHash ( ) ) ; if ( tx == null ) { return TransactionInput . ConnectionResult . NO_SUCH_TX ; } return connect ( tx , mode ) ;
public class CmsListItemWidget { /** * Adds a widget to the button panel . < p > * @ param w the widget to add */ public void addButton ( Widget w ) { } }
m_buttonPanel . add ( w ) ; if ( CmsCoreProvider . get ( ) . isIe7 ( ) ) { m_buttonPanel . getElement ( ) . getStyle ( ) . setWidth ( m_buttonPanel . getWidgetCount ( ) * 22 , Unit . PX ) ; }
public class ImplicitObjectELResolver { /** * If the base object is < code > null < / code > , and the property matches * the name of a JSP implicit object , returns < code > true < / code > * to indicate that implicit objects cannot be overwritten . * < p > The < code > propertyResolved < / code > property of the * < code > ELContext < / code > object must be set to < code > true < / code > by * this resolver before returning if an implicit object is matched . If * this property is not < code > true < / code > after this method is called , * the caller should ignore the return value . < / p > * @ param context The context of this evaluation . * @ param base Only < code > null < / code > is handled by this resolver . * Other values will result in an immediate return . * @ param property The name of the implicit object . * @ return If the < code > propertyResolved < / code > property of * < code > ELContext < / code > was set to < code > true < / code > , then * < code > true < / code > ; otherwise undefined . * @ throws NullPointerException if context is < code > null < / code > . * @ throws ELException if an exception was thrown while performing * the property or variable resolution . The thrown exception * must be included as the cause property of this exception , if * available . */ public boolean isReadOnly ( ELContext context , Object base , Object property ) { } }
if ( context == null ) { throw new NullPointerException ( ) ; } if ( ( base == null ) && ( "pageContext" . equals ( property ) || "pageScope" . equals ( property ) ) || "requestScope" . equals ( property ) || "sessionScope" . equals ( property ) || "applicationScope" . equals ( property ) || "param" . equals ( property ) || "paramValues" . equals ( property ) || "header" . equals ( property ) || "headerValues" . equals ( property ) || "initParam" . equals ( property ) || "cookie" . equals ( property ) ) { context . setPropertyResolved ( true ) ; return true ; } return false ; // Doesn ' t matter
public class AbstractStreamMessage { /** * Helper method for the common case of cleaning up all elements in a queue when shutting down the stream . */ void cleanupQueue ( SubscriptionImpl subscription , Queue < Object > queue ) { } }
final Throwable cause = ClosedPublisherException . get ( ) ; for ( ; ; ) { final Object e = queue . poll ( ) ; if ( e == null ) { break ; } try { if ( e instanceof CloseEvent ) { notifySubscriberOfCloseEvent ( subscription , ( CloseEvent ) e ) ; continue ; } if ( e instanceof CompletableFuture ) { ( ( CompletableFuture < ? > ) e ) . completeExceptionally ( cause ) ; } @ SuppressWarnings ( "unchecked" ) final T obj = ( T ) e ; onRemoval ( obj ) ; } finally { ReferenceCountUtil . safeRelease ( e ) ; } }
public class ZooKeeperHaServices { @ Override public void close ( ) throws Exception { } }
Throwable exception = null ; try { blobStoreService . close ( ) ; } catch ( Throwable t ) { exception = t ; } internalClose ( ) ; if ( exception != null ) { ExceptionUtils . rethrowException ( exception , "Could not properly close the ZooKeeperHaServices." ) ; }
public class EvaluateRetrievalPerformance { /** * Find all matching objects . * @ param posn Output set . * @ param lrelation Label relation * @ param label Query object label */ private void findMatches ( ModifiableDBIDs posn , Relation < ? > lrelation , Object label ) { } }
posn . clear ( ) ; for ( DBIDIter ri = lrelation . iterDBIDs ( ) ; ri . valid ( ) ; ri . advance ( ) ) { if ( match ( label , lrelation . get ( ri ) ) ) { posn . add ( ri ) ; } }
public class CustomUserRegistryWrapper { /** * { @ inheritDoc } */ @ Override @ FFDCIgnore ( com . ibm . websphere . security . EntryNotFoundException . class ) public String getUniqueGroupId ( String groupSecurityName ) throws EntryNotFoundException , RegistryException { } }
try { return customUserRegistry . getUniqueGroupId ( groupSecurityName ) ; } catch ( com . ibm . websphere . security . EntryNotFoundException e ) { throw new EntryNotFoundException ( e . getMessage ( ) , e ) ; } catch ( Exception e ) { throw new RegistryException ( e . getMessage ( ) , e ) ; }
public class OperaDesktopDriver { /** * Executes an opera action . * @ param using - action _ name * @ param data - data parameter * @ param dataString - data string parameter * @ param dataStringParam - parameter to data string */ public void operaDesktopAction ( String using , int data , String dataString , String dataStringParam ) { } }
getScopeServices ( ) . getExec ( ) . action ( using , data , dataString , dataStringParam ) ;
public class Utils { /** * Concatenate and encode the given name / value pairs into a valid URI query string . * This method is the complement of { @ link # parseURIQuery ( String ) } . * @ param uriParams Unencoded name / value pairs . * @ return URI query in the form { name 1 } = { value 1 } { @ literal & } . . . { @ literal & } { name } = { value n } . */ public static String joinURIQuery ( Map < String , String > uriParams ) { } }
StringBuilder buffer = new StringBuilder ( ) ; for ( String name : uriParams . keySet ( ) ) { String value = uriParams . get ( name ) ; if ( buffer . length ( ) > 0 ) { buffer . append ( "&" ) ; } buffer . append ( Utils . urlEncode ( name ) ) ; if ( ! Utils . isEmpty ( value ) ) { buffer . append ( "=" ) ; buffer . append ( Utils . urlEncode ( value ) ) ; } } return buffer . toString ( ) ;
public class MessageStoreImpl { /** * Obtain a list of XIDs which are in - doubt . * Part of MBean interface for resolving in - doubt transactions . * @ return the XIDs as an array of strings */ @ Override public String [ ] listPreparedTransactions ( ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "getPreparedTransactions" ) ; String [ ] col = null ; if ( _manager != null ) { // Obtain array of in - doubt xids from the XidManager Xid [ ] xids = null ; xids = _manager . listRemoteInDoubts ( ) ; if ( xids != null ) { // Get the string representation of each xid // and add to the collection col = new String [ xids . length ] ; for ( int i = 0 ; i < xids . length ; i ++ ) { col [ i ] = ( ( PersistentTranId ) xids [ i ] ) . toString ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "xid " + col [ i ] + " in-doubt" ) ; } } } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "getPreparedTransactions" ) ; return col ;
public class TemplatesApi { /** * Gets PDF documents from a template . * Retrieves one or more PDF documents from the specified template . You can specify the ID of the document to retrieve or can specify & # x60 ; combined & # x60 ; to retrieve all documents in the template as one pdf . * @ param accountId The external account number ( int ) or account ID Guid . ( required ) * @ param templateId The ID of the template being accessed . ( required ) * @ param documentId The ID of the document being accessed . ( required ) * @ return byte [ ] */ public byte [ ] getDocument ( String accountId , String templateId , String documentId ) throws ApiException { } }
return getDocument ( accountId , templateId , documentId , null ) ;
public class Threads { /** * sleep 等待 。 已捕捉并处理 InterruptedException 。 * @ param duration * 等待时间 * @ param unit * 时间单位 */ public static void sleep ( final long duration , final TimeUnit unit ) { } }
try { Thread . sleep ( unit . toMillis ( duration ) ) ; } catch ( InterruptedException e ) { LOG . trace ( "" , e ) ; Thread . currentThread ( ) . interrupt ( ) ; }
public class Request { /** * Returns a boolean value . To be used when parameter is required or has a default value . * @ throws java . lang . IllegalArgumentException is value is null or blank */ public boolean mandatoryParamAsBoolean ( String key ) { } }
String s = mandatoryParam ( key ) ; return parseBoolean ( key , s ) ;
public class UploadServlet { /** * Parse the properties . * Override this to do extra stuff . */ public void parseProperties ( Properties properties , MultipartRequest multi ) { } }
Enumeration < ? > params = multi . getParameterNames ( ) ; while ( params . hasMoreElements ( ) ) { String name = ( String ) params . nextElement ( ) ; String value = multi . getParameter ( name ) ; if ( value != null ) properties . setProperty ( name , value ) ; if ( DEBUG ) System . out . println ( name + " = " + value ) ; }
public class Logger { /** * { @ inheritDoc } */ public final void warn ( String message , Throwable throwable ) { } }
if ( isWarnEnabled ( ) ) { out . print ( "[ maven embedder WARNING] " ) ; out . println ( message ) ; if ( null != throwable ) { throwable . printStackTrace ( out ) ; } }
public class RuntimeEnv { /** * Returns an instatiator for the specified { @ code clazz } . */ public static < T > Instantiator < T > newInstantiator ( Class < T > clazz ) { } }
final Constructor < T > constructor = getConstructor ( clazz ) ; if ( constructor == null ) { // non - sun jre if ( newInstanceFromObjectInputStream == null ) { if ( objectConstructorId == - 1 ) throw new RuntimeException ( "Could not resolve constructor for " + clazz ) ; return new Android3Instantiator < T > ( clazz ) ; } return new Android2Instantiator < T > ( clazz ) ; } return new DefaultInstantiator < T > ( constructor ) ;
public class Frame { public Frame name ( String name , String src ) { } }
this . name = name ; this . src = src ; return this ;
public class StateAssignmentOperation { /** * Verifies conditions in regards to parallelism and maxParallelism that must be met when restoring state . * @ param operatorState state to restore * @ param executionJobVertex task for which the state should be restored */ private static void checkParallelismPreconditions ( OperatorState operatorState , ExecutionJobVertex executionJobVertex ) { } }
// - - - - - max parallelism preconditions - - - - - if ( operatorState . getMaxParallelism ( ) < executionJobVertex . getParallelism ( ) ) { throw new IllegalStateException ( "The state for task " + executionJobVertex . getJobVertexId ( ) + " can not be restored. The maximum parallelism (" + operatorState . getMaxParallelism ( ) + ") of the restored state is lower than the configured parallelism (" + executionJobVertex . getParallelism ( ) + "). Please reduce the parallelism of the task to be lower or equal to the maximum parallelism." ) ; } // check that the number of key groups have not changed or if we need to override it to satisfy the restored state if ( operatorState . getMaxParallelism ( ) != executionJobVertex . getMaxParallelism ( ) ) { if ( ! executionJobVertex . isMaxParallelismConfigured ( ) ) { // if the max parallelism was not explicitly specified by the user , we derive it from the state LOG . debug ( "Overriding maximum parallelism for JobVertex {} from {} to {}" , executionJobVertex . getJobVertexId ( ) , executionJobVertex . getMaxParallelism ( ) , operatorState . getMaxParallelism ( ) ) ; executionJobVertex . setMaxParallelism ( operatorState . getMaxParallelism ( ) ) ; } else { // if the max parallelism was explicitly specified , we complain on mismatch throw new IllegalStateException ( "The maximum parallelism (" + operatorState . getMaxParallelism ( ) + ") with which the latest " + "checkpoint of the execution job vertex " + executionJobVertex + " has been taken and the current maximum parallelism (" + executionJobVertex . getMaxParallelism ( ) + ") changed. This " + "is currently not supported." ) ; } }
public class SubWriterHolderWriter { /** * Add the summary link comment . * @ param mw the writer for the member being documented * @ param member the member being documented * @ param firstSentenceTags the first sentence tags for the member to be documented * @ param tdSummary the content tree to which the comment will be added */ public void addSummaryLinkComment ( AbstractMemberWriter mw , Element member , List < ? extends DocTree > firstSentenceTags , Content tdSummary ) { } }
addIndexComment ( member , firstSentenceTags , tdSummary ) ;
public class IfcStructuralLoadGroupImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ SuppressWarnings ( "unchecked" ) @ Override public EList < IfcStructuralResultGroup > getSourceOfResultGroup ( ) { } }
return ( EList < IfcStructuralResultGroup > ) eGet ( Ifc4Package . Literals . IFC_STRUCTURAL_LOAD_GROUP__SOURCE_OF_RESULT_GROUP , true ) ;
public class STSAssumeRoleSessionCredentialsProvider { /** * Starts a new session by sending a request to the AWS Security Token Service ( STS ) to assume a * Role using the long lived AWS credentials . This class then vends the short lived session * credentials for the assumed Role sent back from STS . */ private SessionCredentialsHolder newSession ( ) { } }
AssumeRoleRequest assumeRoleRequest = new AssumeRoleRequest ( ) . withRoleArn ( roleArn ) . withDurationSeconds ( roleSessionDurationSeconds ) . withRoleSessionName ( roleSessionName ) . withPolicy ( scopeDownPolicy ) ; if ( roleExternalId != null ) { assumeRoleRequest = assumeRoleRequest . withExternalId ( roleExternalId ) ; } AssumeRoleResult assumeRoleResult = securityTokenService . assumeRole ( assumeRoleRequest ) ; return new SessionCredentialsHolder ( assumeRoleResult . getCredentials ( ) ) ;
public class XmlEscapeSymbols { /** * These two methods ( two versions : for String and for char [ ] ) compare each of the candidate * text fragments with an CER coming from the SORTED _ CERS array , during binary search operations . */ private static int compare ( final char [ ] cer , final String text , final int start , final int end ) { } }
final int textLen = end - start ; final int maxCommon = Math . min ( cer . length , textLen ) ; int i ; // char 0 is discarded , will be & in both cases for ( i = 1 ; i < maxCommon ; i ++ ) { final char tc = text . charAt ( start + i ) ; if ( cer [ i ] < tc ) { return - 1 ; } else if ( cer [ i ] > tc ) { return 1 ; } } if ( cer . length > i ) { return 1 ; } if ( textLen > i ) { return - 1 ; } return 0 ;
public class BasicRandomRoutingTable { /** * Determine the next TrustGraphNodeId in a route containing * a given neighbor as the prior node . The next hop is the * TrustGraphNodeId paired with the given neighbor in the table . * @ param priorNeighbor the prior node on the route * @ return the next TrustGraphNodeId to route a message to * or null if the next hop cannot be determined . * @ see RandomRoutingTable . getNextHop ( TrustGraphNodeId ) */ @ Override public TrustGraphNodeId getNextHop ( final TrustGraphNodeId priorNeighbor ) { } }
if ( priorNeighbor != null ) { return routingTable . get ( priorNeighbor ) ; } else { return null ; }
public class EvaluatorSupport { /** * Die Methode execute wird aufgerufen , wenn der Context eines Tags geprueft werden soll . Diese * Methode ueberschreibt , jene des Interface Evaluator . Falls diese Methode durch eine * Implementation nicht ueberschrieben wird , ruft sie wiederere , allenfalls implementierte evaluate * Methoden auf . Mit Hilfe dieses Konstrukt ist es moeglich drei evaluate methoden anzubieten . * @ param cfxdTag Das konkrete Tag innerhalb der kompletten CFXD . * @ param libTag Die Definition des Tag aus der TLD . * @ param flibs Saemtliche Function Library Deskriptoren des aktuellen Tag Libray Deskriptors . * @ param srcCode * @ return TagLib * @ throws TemplateException */ @ Override public TagLib execute ( Config config , Tag tag , TagLibTag libTag , FunctionLib [ ] flibs , Data data ) throws TemplateException { } }
return null ;
public class ConnectionDAODefaultImpl { public long ping ( final Connection connection ) throws DevFailed { } }
long result = 0 ; final int maxRetries = connection . transparent_reconnection ? 1 : 0 ; int nbRetries = 0 ; boolean retry ; do { try { result = doPing ( connection ) ; retry = false ; } catch ( final DevFailed e ) { if ( nbRetries < maxRetries ) { retry = true ; } else { throw e ; } nbRetries ++ ; } } while ( retry ) ; return result ;
public class Default { public void handlePut ( HttpServletRequest request , HttpServletResponse response , String pathInContext , Resource resource ) throws ServletException , IOException { } }
boolean exists = resource != null && resource . exists ( ) ; if ( exists && ! passConditionalHeaders ( request , response , resource ) ) return ; if ( pathInContext . endsWith ( "/" ) ) { if ( ! exists ) { if ( ! resource . getFile ( ) . mkdirs ( ) ) response . sendError ( HttpResponse . __403_Forbidden , "Directories could not be created" ) ; else { response . setStatus ( HttpResponse . __201_Created ) ; response . flushBuffer ( ) ; } } else { response . setStatus ( HttpResponse . __200_OK ) ; response . flushBuffer ( ) ; } } else { try { int toRead = request . getContentLength ( ) ; InputStream in = request . getInputStream ( ) ; OutputStream out = resource . getOutputStream ( ) ; if ( toRead >= 0 ) IO . copy ( in , out , toRead ) ; else IO . copy ( in , out ) ; out . close ( ) ; response . setStatus ( exists ? HttpResponse . __200_OK : HttpResponse . __201_Created ) ; response . flushBuffer ( ) ; } catch ( Exception ex ) { log . warn ( LogSupport . EXCEPTION , ex ) ; response . sendError ( HttpResponse . __403_Forbidden , ex . getMessage ( ) ) ; } }
public class CmsEditProjectDialog { /** * Sets the name of the project . < p > * @ param name the name to set */ public void setName ( String name ) { } }
String oufqn = getOufqn ( ) ; if ( oufqn != null ) { if ( ! oufqn . endsWith ( "/" ) ) { oufqn += "/" ; } } else { oufqn = "/" ; } m_project . setName ( oufqn + name ) ;
public class IPBondLearningDescriptor { /** * This method calculates the ionization potential of a bond . * @ param atomContainer Parameter is the IAtomContainer . * @ return The ionization potential */ @ Override public DescriptorValue calculate ( IBond bond , IAtomContainer atomContainer ) { } }
double value = 0 ; // FIXME : for now I ' ll cache a few modified atomic properties , and restore them at the end of this method String originalAtomtypeName1 = bond . getBegin ( ) . getAtomTypeName ( ) ; Integer originalNeighborCount1 = bond . getBegin ( ) . getFormalNeighbourCount ( ) ; IAtomType . Hybridization originalHybridization1 = bond . getBegin ( ) . getHybridization ( ) ; Integer originalValency1 = bond . getBegin ( ) . getValency ( ) ; String originalAtomtypeName2 = bond . getEnd ( ) . getAtomTypeName ( ) ; Integer originalNeighborCount2 = bond . getEnd ( ) . getFormalNeighbourCount ( ) ; IAtomType . Hybridization originalHybridization2 = bond . getEnd ( ) . getHybridization ( ) ; Integer originalValency2 = bond . getEnd ( ) . getValency ( ) ; Double originalBondOrderSum1 = bond . getBegin ( ) . getBondOrderSum ( ) ; Order originalMaxBondOrder1 = bond . getBegin ( ) . getMaxBondOrder ( ) ; Double originalBondOrderSum2 = bond . getEnd ( ) . getBondOrderSum ( ) ; Order originalMaxBondOrder2 = bond . getEnd ( ) . getMaxBondOrder ( ) ; if ( ! isCachedAtomContainer ( atomContainer ) ) { try { AtomContainerManipulator . percieveAtomTypesAndConfigureAtoms ( atomContainer ) ; LonePairElectronChecker lpcheck = new LonePairElectronChecker ( ) ; lpcheck . saturate ( atomContainer ) ; } catch ( CDKException e ) { return getDummyDescriptorValue ( e ) ; } } if ( ! bond . getOrder ( ) . equals ( IBond . Order . SINGLE ) ) { try { value = IonizationPotentialTool . predictIP ( atomContainer , bond ) ; } catch ( CDKException e ) { return getDummyDescriptorValue ( e ) ; } } bond . getBegin ( ) . setAtomTypeName ( originalAtomtypeName1 ) ; bond . getBegin ( ) . setHybridization ( originalHybridization1 ) ; bond . getBegin ( ) . setValency ( originalValency1 ) ; bond . getBegin ( ) . setFormalNeighbourCount ( originalNeighborCount1 ) ; bond . getEnd ( ) . setAtomTypeName ( originalAtomtypeName2 ) ; bond . getEnd ( ) . setHybridization ( originalHybridization2 ) ; bond . getEnd ( ) . setValency ( originalValency2 ) ; bond . getEnd ( ) . setFormalNeighbourCount ( originalNeighborCount2 ) ; bond . getBegin ( ) . setMaxBondOrder ( originalMaxBondOrder1 ) ; bond . getBegin ( ) . setBondOrderSum ( originalBondOrderSum1 ) ; bond . getEnd ( ) . setMaxBondOrder ( originalMaxBondOrder2 ) ; bond . getEnd ( ) . setBondOrderSum ( originalBondOrderSum2 ) ; return new DescriptorValue ( getSpecification ( ) , getParameterNames ( ) , getParameters ( ) , new DoubleResult ( value ) , DESCRIPTOR_NAMES ) ;
public class AFactoryAppBeans { /** * < p > Get SrvI18n in lazy mode . < / p > * @ return SrvI18n - SrvI18n * @ throws Exception - an exception */ public final SrvI18n lazyGetSrvI18n ( ) throws Exception { } }
String beanName = getSrvI18nName ( ) ; SrvI18n srvI18n = ( SrvI18n ) this . beansMap . get ( beanName ) ; if ( srvI18n == null ) { srvI18n = new SrvI18n ( ) ; srvI18n . setLogger ( lazyGetLogger ( ) ) ; srvI18n . initDefault ( ) ; this . beansMap . put ( beanName , srvI18n ) ; lazyGetLogger ( ) . info ( null , AFactoryAppBeans . class , beanName + " has been created." ) ; } return srvI18n ;
public class LinuxTaskController { /** * Enables the task for cleanup by changing permissions of the specified path * in the local filesystem */ @ Override void enableTaskForCleanup ( PathDeletionContext context ) throws IOException { } }
if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Going to do " + TaskCommands . ENABLE_TASK_FOR_CLEANUP . toString ( ) + " for " + context . fullPath ) ; } if ( context instanceof TaskControllerPathDeletionContext ) { TaskControllerPathDeletionContext tContext = ( TaskControllerPathDeletionContext ) context ; if ( tContext . task . getUser ( ) != null && tContext . fs instanceof LocalFileSystem ) { try { runCommand ( TaskCommands . ENABLE_TASK_FOR_CLEANUP , tContext . task . getUser ( ) , buildTaskCleanupArgs ( tContext ) , null , null ) ; } catch ( IOException e ) { LOG . warn ( "Uanble to change permissions for " + tContext . fullPath ) ; } } else { throw new IllegalArgumentException ( "Either user is null or the " + "file system is not local file system." ) ; } } else { throw new IllegalArgumentException ( "PathDeletionContext provided is not " + "TaskControllerPathDeletionContext." ) ; }
public class Tree { /** * Sets the image name for a spacer image used to align the other images inside the tree . ( Defaults to " spacer . gif " ) . * @ param spacerImage the image name ( including extension ) * @ jsptagref . attributedescription Sets the image name for a spacer image used to align the other images inside the tree . * ( Defaults to " spacer . gif " ) . * @ jsptagref . databindable false * @ jsptagref . attributesyntaxvalue < i > string _ spacerImage < / i > * @ netui : attribute required = " false " rtexprvalue = " true " * description = " Sets the image name for a spacer image used to align the other images inside the tree . " */ public void setSpacerImage ( String spacerImage ) { } }
String val = setNonEmptyValueAttribute ( spacerImage ) ; if ( val != null ) _iState . setImageSpacer ( setNonEmptyValueAttribute ( val ) ) ;
public class VBSFaxClientSpi { /** * This function formats the provided object to enable embedding * in VBS code . * @ param object * The object to format * @ return The formatted object */ protected Object formatObject ( Object object ) { } }
Object formattedObject = object ; if ( object == null ) { formattedObject = "" ; } else if ( object instanceof String ) { // get string String string = ( String ) object ; // remove characters string = string . replaceAll ( "\n" , "" ) ; string = string . replaceAll ( "\r" , "" ) ; string = string . replaceAll ( "\t" , "" ) ; string = string . replaceAll ( "\f" , "" ) ; string = string . replaceAll ( "\b" , "" ) ; string = string . replaceAll ( "'" , "" ) ; string = string . replaceAll ( "\"" , "" ) ; // get reference formattedObject = string ; } else if ( object instanceof File ) { // get file File file = ( File ) object ; String filePath = null ; try { filePath = file . getCanonicalPath ( ) ; } catch ( IOException exception ) { throw new FaxException ( "Unable to get file path." , exception ) ; } filePath = filePath . replaceAll ( "\\\\" , "\\\\\\\\" ) ; // get reference formattedObject = filePath ; } return formattedObject ;
public class ZonalQuery { /** * ~ Methoden - - - - - */ @ Override public V apply ( Moment context ) { } }
ZonalOffset shift = ( ( this . offset == null ) ? this . tz . getOffset ( context ) : this . offset ) ; if ( ( this . element == PlainTime . SECOND_OF_MINUTE ) && context . isLeapSecond ( ) && ( shift . getFractionalAmount ( ) == 0 ) && ( ( shift . getAbsoluteSeconds ( ) % 60 ) == 0 ) ) { return this . element . getType ( ) . cast ( Integer . valueOf ( 60 ) ) ; } return PlainTimestamp . from ( context , shift ) . get ( this . element ) ;
public class Descriptor { /** * Look out for a typical error a plugin developer makes . * See http : / / hudson . 361315 . n4 . nabble . com / Help - Hint - needed - Post - build - action - doesn - t - stay - activated - td2308833 . html */ private T verifyNewInstance ( T t ) { } }
if ( t != null && t . getDescriptor ( ) != this ) { // TODO : should this be a fatal error ? LOGGER . warning ( "Father of " + t + " and its getDescriptor() points to two different instances. Probably malplaced @Extension. See http://hudson.361315.n4.nabble.com/Help-Hint-needed-Post-build-action-doesn-t-stay-activated-td2308833.html" ) ; } return t ;
public class LoginProcessor { /** * Checks if the request URL matches the { @ code loginUrl } and the HTTP method matches the { @ code loginMethod } . If * it does , it proceeds to login the user using the username / password specified in the parameters . * @ param context the context which holds the current request and response * @ param processorChain the processor chain , used to call the next processor */ public void processRequest ( RequestContext context , RequestSecurityProcessorChain processorChain ) throws Exception { } }
HttpServletRequest request = context . getRequest ( ) ; if ( isLoginRequest ( request ) ) { logger . debug ( "Processing login request" ) ; String [ ] tenants = tenantsResolver . getTenants ( ) ; if ( ArrayUtils . isEmpty ( tenants ) ) { throw new IllegalArgumentException ( "No tenants resolved for authentication" ) ; } String username = getUsername ( request ) ; String password = getPassword ( request ) ; if ( username == null ) { username = "" ; } if ( password == null ) { password = "" ; } try { logger . debug ( "Attempting authentication of user '{}' with tenants {}" , username , tenants ) ; Authentication auth = authenticationManager . authenticateUser ( tenants , username , password ) ; if ( getRememberMe ( request ) ) { rememberMeManager . enableRememberMe ( auth , context ) ; } else { rememberMeManager . disableRememberMe ( context ) ; } onLoginSuccess ( context , auth ) ; } catch ( AuthenticationException e ) { onLoginFailure ( context , e ) ; } } else { processorChain . processRequest ( context ) ; }
public class AtsdMeta { /** * Prepare URL to retrieve metrics * @ param metricMasks filter specified in ` tables ` connection string parameter * @ param tableFilter filter specified in method parameter * @ param underscoreAsLiteral treat underscore as not a metacharacter * @ return MetricLocation */ @ Nonnull static Collection < MetricLocation > prepareGetMetricUrls ( List < String > metricMasks , String tableFilter , boolean underscoreAsLiteral ) { } }
if ( WildcardsUtil . isRetrieveAllPattern ( tableFilter ) || tableFilter . isEmpty ( ) ) { if ( metricMasks . isEmpty ( ) ) { return Collections . emptyList ( ) ; } else { return buildPatternDisjunction ( metricMasks , underscoreAsLiteral ) ; } } else { return Collections . singletonList ( buildAtsdPatternUrl ( tableFilter , underscoreAsLiteral ) ) ; }
public class ByteBuffersIO { /** * Merge the byte arrays of this class into a single one and return it . */ public byte [ ] createSingleByteArray ( ) { } }
byte [ ] buf = new byte [ totalSize ] ; int pos = 0 ; for ( Triple < byte [ ] , Integer , Integer > t : buffers ) { System . arraycopy ( t . getValue1 ( ) , t . getValue2 ( ) . intValue ( ) , buf , pos , t . getValue3 ( ) . intValue ( ) ) ; pos += t . getValue3 ( ) . intValue ( ) ; } return buf ;
public class AnnotatedValueResolver { /** * Returns an array of arguments which are resolved by each { @ link AnnotatedValueResolver } of the * specified { @ code resolvers } . */ static Object [ ] toArguments ( List < AnnotatedValueResolver > resolvers , ResolverContext resolverContext ) { } }
requireNonNull ( resolvers , "resolvers" ) ; requireNonNull ( resolverContext , "resolverContext" ) ; if ( resolvers . isEmpty ( ) ) { return emptyArguments ; } return resolvers . stream ( ) . map ( resolver -> resolver . resolve ( resolverContext ) ) . toArray ( ) ;
public class CXFEndpointProvider { /** * Extracts the bindingId from a Server . * @ param server * @ return */ private static String getBindingId ( Server server ) { } }
Endpoint ep = server . getEndpoint ( ) ; BindingInfo bi = ep . getBinding ( ) . getBindingInfo ( ) ; return bi . getBindingId ( ) ;
public class ApiOvhTelephony { /** * Delete the given scheduler event * REST : DELETE / telephony / { billingAccount } / scheduler / { serviceName } / events / { uid } * @ param billingAccount [ required ] The name of your billingAccount * @ param serviceName [ required ] * @ param uid [ required ] The unique ICS event identifier */ public void billingAccount_scheduler_serviceName_events_uid_DELETE ( String billingAccount , String serviceName , String uid ) throws IOException { } }
String qPath = "/telephony/{billingAccount}/scheduler/{serviceName}/events/{uid}" ; StringBuilder sb = path ( qPath , billingAccount , serviceName , uid ) ; exec ( qPath , "DELETE" , sb . toString ( ) , null ) ;
public class JPATxEmInvocation { /** * ( non - Javadoc ) * @ see javax . persistence . EntityManager # createNamedQuery ( java . lang . String ) */ @ Override public Query createNamedQuery ( String name ) { } }
try { return ivEm . createNamedQuery ( name ) ; } finally { if ( ! inJTATransaction ( ) ) { ivEm . clear ( ) ; } }
public class DBInstance { /** * Provides List of DB security group elements containing only < code > DBSecurityGroup . Name < / code > and * < code > DBSecurityGroup . Status < / code > subelements . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setDBSecurityGroups ( java . util . Collection ) } or { @ link # withDBSecurityGroups ( java . util . Collection ) } if you * want to override the existing values . * @ param dBSecurityGroups * Provides List of DB security group elements containing only < code > DBSecurityGroup . Name < / code > and * < code > DBSecurityGroup . Status < / code > subelements . * @ return Returns a reference to this object so that method calls can be chained together . */ public DBInstance withDBSecurityGroups ( DBSecurityGroupMembership ... dBSecurityGroups ) { } }
if ( this . dBSecurityGroups == null ) { setDBSecurityGroups ( new com . amazonaws . internal . SdkInternalList < DBSecurityGroupMembership > ( dBSecurityGroups . length ) ) ; } for ( DBSecurityGroupMembership ele : dBSecurityGroups ) { this . dBSecurityGroups . add ( ele ) ; } return this ;
public class TherianContext { /** * Convenience method to perform an operation , discarding its result , and report whether it succeeded . * @ param operation * @ param hints * @ return whether { @ code operation } was supported and successful * @ throws NullPointerException on { @ code null } input * @ throws OperationException potentially , via { @ link Operation # getResult ( ) } */ public final synchronized boolean evalSuccess ( Operation < ? > operation , Hint ... hints ) { } }
final boolean dummyRoot = stack . isEmpty ( ) ; if ( dummyRoot ) { // add a root frame to preserve our cache " around " the supports / eval lifecycle , bypassing # push ( ) : stack . push ( Frame . ROOT ) ; } try { if ( supports ( operation , hints ) ) { eval ( operation , hints ) ; return operation . isSuccessful ( ) ; } } finally { if ( dummyRoot ) { pop ( Frame . ROOT ) ; } } return false ;
public class RythmEngine { /** * Evaluate a script and return executing result . Note the API is not mature yet * don ' t use it in your application * @ param script * @ return the result */ public Object eval ( String script ) { } }
// / / use Java ' s ScriptEngine at the moment // ScriptEngineManager manager = new ScriptEngineManager ( ) ; // ScriptEngine jsEngine = manager . getEngineByName ( " JavaScript " ) ; // try { // return jsEngine . eval ( script ) ; // } catch ( ScriptException e ) { // throw new RuntimeException ( e ) ; return eval ( script , Collections . < String , Object > emptyMap ( ) ) ;
public class Vector4d { /** * Set this { @ link Vector4d } to the values of the given < code > v < / code > . * @ param v * the vector whose values will be copied into this * @ return this */ public Vector4d set ( Vector4ic v ) { } }
return set ( v . x ( ) , v . y ( ) , v . z ( ) , v . w ( ) ) ;
public class TreeModelUtils { /** * Debug method to print tree node structure * @ param rootNode * @ param tab */ public static void printNodeData ( Node rootNode , String tab ) { } }
tab = tab == null ? "" : tab + " " ; for ( Node n : rootNode . getChilds ( ) ) { printNodeData ( n , tab ) ; }
public class LambdaDslObject { /** * Attribute that is an array of values with a minimum and maximum size that are not objects where each item must * match the following example * @ param name field name * @ param minSize minimum size of the array * @ param maxSize maximum size of the array * @ param value Value to use to match each item * @ param numberExamples number of examples to generate */ public LambdaDslObject minMaxArrayLike ( String name , Integer minSize , Integer maxSize , PactDslJsonRootValue value , int numberExamples ) { } }
object . minMaxArrayLike ( name , minSize , maxSize , value , numberExamples ) ; return this ;
public class StringUtils { /** * < p > Checks if any of the CharSequences are empty ( " " ) or null . < / p > * < pre > * StringUtils . isAnyEmpty ( null ) = true * StringUtils . isAnyEmpty ( null , " foo " ) = true * StringUtils . isAnyEmpty ( " " , " bar " ) = true * StringUtils . isAnyEmpty ( " bob " , " " ) = true * StringUtils . isAnyEmpty ( " bob " , null ) = true * StringUtils . isAnyEmpty ( " " , " bar " ) = false * StringUtils . isAnyEmpty ( " foo " , " bar " ) = false * StringUtils . isAnyEmpty ( new String [ ] { } ) = false * StringUtils . isAnyEmpty ( new String [ ] { " " } ) = true * < / pre > * @ param css the CharSequences to check , may be null or empty * @ return { @ code true } if any of the CharSequences are empty or null * @ since 3.2 */ public static boolean isAnyEmpty ( final CharSequence ... css ) { } }
if ( ArrayUtils . isEmpty ( css ) ) { return false ; } for ( final CharSequence cs : css ) { if ( isEmpty ( cs ) ) { return true ; } } return false ;
public class DistCp { /** * < p > It contains two steps : < p > * < p > step 1 . change the src file list into the src chunk file list * src file list is a list of ( LongWritable , FilePair ) . LongWritable is * the length of the file while FilePair contains info of src and dst . * In order to copy file by chunks , we want to convert src file list into * src chunk file list , which is a list of ( LongWritable , FileChunkPair ) * Files are chopped into chunks based on follow rule : * Given the targeSize , which can be calculated based on the num of splits * and tells how many bytes each mapper roughly wants to copy , we try * to fill the targeSize by file blocks . * A chunk will be generated either when * i ) we meet the targetSize , this mapper is full . or * ii ) it ' s already the end of the file * For example , the targeSize is 2000 , and we have two files with * size 980 ( fileA ) and 3200 ( fileB ) respectively , both of them has * block size 512 . For fileA , we have 2 blocks : one of size 512, * the other of size 468 . 512 + 468 < 2000 , so we put both * of them in the 1st mapper , which means fileA only has 1 chunk file . * We still have 2000 - 980 = 1020 bytes left for the 1st mapper , * so we will keep feed blocks from fileB to this mapper until * it exceed the target size . In this case , we can still feed 2 more * blocks from fileB to the 1st mapper . And those 2 blocks * will be the 1st chunk of fileB ( with offset = 0 , length = 1024 ) . * For the rest of fileB , we will create another split for 2nd mapper , * and they will be the 2nd chunk of fileB ( with offset = 1024, * length = 2176 ) < p > * < p > step 2 . generate splits from src chunk file list * go through the src chunk file list , and generate FileSplit by providing * the starting pos and splitsize < p > * < p > At the same time , another list called dst chunk file dir list is * generated . < p > * < p > When the - copybychunk flag is turned on , for each file , we will create * a directory in the dst file system first . For example , for fileA , we * will create a directry named fileA _ chunkfiles , which contains all the * file chunks with name 0 , 1 , 2 . . dst chunk file dir list contains the * original file name , and how many chunks each dir contains . We will * use this list when stitching file chunks together after all the * mappers are done . < p > */ static private void createFileChunkList ( JobConf job , Path jobDirectory , FileSystem jobfs , ArrayList < FilePairComparable > filePairList , ArrayList < LongWritable > fileLengthList , long targetSize , boolean skipUnderConstructionFile ) throws IOException { } }
boolean preserve_status = job . getBoolean ( Options . PRESERVE_STATUS . propertyname , false ) ; EnumSet < FileAttribute > preserved = null ; boolean preserve_block_size = false ; if ( preserve_status ) { preserved = FileAttribute . parse ( job . get ( PRESERVE_STATUS_LABEL ) ) ; if ( preserved . contains ( FileAttribute . BLOCK_SIZE ) ) { preserve_block_size = true ; } } Path destPath = new Path ( job . get ( DST_DIR_LABEL , "/" ) ) ; FileSystem destFileSys ; try { destFileSys = destPath . getFileSystem ( job ) ; } catch ( IOException ex ) { throw new RuntimeException ( "Unable to get the named file system." , ex ) ; } // create src chunk file list for splits Path srcFileListPath = new Path ( jobDirectory , "_distcp_src_files" ) ; job . set ( SRC_LIST_LABEL , srcFileListPath . toString ( ) ) ; SequenceFile . Writer src_file_writer = SequenceFile . createWriter ( jobfs , job , srcFileListPath , LongWritable . class , FileChunkPair . class , jobfs . getConf ( ) . getInt ( "io.file.buffer.size" , 4096 ) , SRC_FILES_LIST_REPL_DEFAULT , jobfs . getDefaultBlockSize ( ) , SequenceFile . CompressionType . NONE , new DefaultCodec ( ) , null , new Metadata ( ) ) ; // store the file chunk information based on the target size long acc = 0L ; long pos = 0L ; long last = 0L ; long cbsyncs = 0L ; int cnsyncf = 0 ; int dstsyn = 0 ; // create the split files which can be directly read by getSplit ( ) Path splitPath = new Path ( jobDirectory , "_distcp_file_splits" ) ; job . set ( SPLIT_LIST_LABEL , splitPath . toString ( ) ) ; SequenceFile . Writer split_writer = SequenceFile . createWriter ( jobfs , job , splitPath , LongWritable . class , LongWritable . class , SequenceFile . CompressionType . NONE ) ; // for stitching chunk files Path dstChunkFileDirListPath = new Path ( jobDirectory , "_distcp_dst_chunk_files_dir" ) ; job . set ( DST_CHUNK_FILE_LIST_LABEL , dstChunkFileDirListPath . toString ( ) ) ; SequenceFile . Writer dst_chunk_file_writer = SequenceFile . createWriter ( jobfs , job , dstChunkFileDirListPath , IntWritable . class , Text . class , SequenceFile . CompressionType . NONE ) ; try { for ( int i = 0 ; i < filePairList . size ( ) ; ++ i ) { FilePairComparable fp = filePairList . get ( i ) ; // check if the source file is under construnction if ( ! fp . input . isDir ( ) && skipUnderConstructionFile ) { FileSystem srcFileSys = fp . input . getPath ( ) . getFileSystem ( job ) ; LOG . debug ( "Check file :" + fp . input . getPath ( ) ) ; LocatedBlocks locatedBlks = DFSUtil . convertToDFS ( srcFileSys ) . getLocatedBlocks ( fp . input . getPath ( ) , 0 , fp . input . getLen ( ) ) ; if ( locatedBlks . isUnderConstruction ( ) ) { LOG . debug ( "Skip under construnction file: " + fp . input . getPath ( ) ) ; continue ; } } long blockSize = destFileSys . getDefaultBlockSize ( ) ; if ( preserve_block_size ) { blockSize = fp . input . getBlockSize ( ) ; } long restFileLength = fileLengthList . get ( i ) . get ( ) ; long offset = 0 ; long chunkLength = 0 ; long lengthAddedToChunk = 0 ; int chunkIndex = 0 ; if ( restFileLength == 0 ) { src_file_writer . append ( new LongWritable ( 0 ) , new FileChunkPair ( fp . input , fp . output , 0 , 0 , 0 ) ) ; ++ chunkIndex ; } else { while ( restFileLength > 0 ) { if ( restFileLength > blockSize ) { lengthAddedToChunk = blockSize ; } else { lengthAddedToChunk = restFileLength ; } chunkLength += lengthAddedToChunk ; // reach the end of the target map size , write blocks in one chunk // then start a new split if ( acc + lengthAddedToChunk > targetSize ) { src_file_writer . append ( fileLengthList . get ( i ) , new FileChunkPair ( fp . input , fp . output , offset , chunkLength , chunkIndex ) ) ; pos = src_file_writer . getLength ( ) ; long splitsize = pos - last ; split_writer . append ( new LongWritable ( last ) , new LongWritable ( splitsize ) ) ; ++ cnsyncf ; cbsyncs += chunkLength ; if ( cnsyncf > SYNC_FILE_MAX || cbsyncs > BYTES_PER_MAP ) { src_file_writer . sync ( ) ; split_writer . sync ( ) ; cnsyncf = 0 ; cbsyncs = 0L ; } last = pos ; offset += chunkLength ; ++ chunkIndex ; chunkLength = 0L ; acc = 0L ; restFileLength -= lengthAddedToChunk ; } else { acc += lengthAddedToChunk ; restFileLength -= lengthAddedToChunk ; // reach the end of the file . if ( restFileLength == 0 ) { src_file_writer . append ( fileLengthList . get ( i ) , new FileChunkPair ( fp . input , fp . output , offset , chunkLength , chunkIndex ) ) ; LOG . info ( "create chunk, offest " + offset + " chunkLength " + chunkLength + " chunkIndex " + chunkIndex ) ; cbsyncs += chunkLength ; if ( cbsyncs > BYTES_PER_MAP ) { src_file_writer . sync ( ) ; cbsyncs = 0L ; } ++ chunkIndex ; } } } if ( ++ dstsyn > SYNC_FILE_MAX ) { dstsyn = 0 ; dst_chunk_file_writer . sync ( ) ; } } if ( ! fp . input . isDir ( ) ) { dst_chunk_file_writer . append ( new IntWritable ( chunkIndex ) , new Text ( fp . output ) ) ; } } // add the rest as the last split long cbrem = src_file_writer . getLength ( ) - last ; if ( cbrem != 0 ) { split_writer . append ( new LongWritable ( last ) , new LongWritable ( cbrem ) ) ; } } finally { checkAndClose ( src_file_writer ) ; checkAndClose ( dst_chunk_file_writer ) ; checkAndClose ( split_writer ) ; }
public class CalendarDay { /** * Get a new instance set to the specified day * @ param year new instance ' s year * @ param month new instance ' s month as defined by { @ linkplain java . util . Calendar } * @ param day new instance ' s day of month * @ return CalendarDay set to the specified date */ @ NonNull public static CalendarDay from ( int year , int month , int day ) { } }
return new CalendarDay ( year , month , day ) ;
public class SLF4JBridgeHandler { /** * Returns true if SLF4JBridgeHandler has been previously installed , returns false otherwise . * @ return true if SLF4JBridgeHandler is already installed , false other wise * @ throws SecurityException */ public static boolean isInstalled ( ) throws SecurityException { } }
java . util . logging . Logger rootLogger = getRootLogger ( ) ; Handler [ ] handlers = rootLogger . getHandlers ( ) ; for ( int i = 0 ; i < handlers . length ; i ++ ) { if ( handlers [ i ] instanceof SLF4JBridgeHandler ) { return true ; } } return false ;
public class CacheEntry { /** * Compare for FIFO * @ param other the other entry * @ return results */ private int compareToFIFO ( CacheEntry other ) { } }
int cmp = compareOrder ( other ) ; if ( cmp != 0 ) { return cmp ; } cmp = compareTime ( other ) ; if ( cmp != 0 ) { return cmp ; } return cmp = compareReadCount ( other ) ;
public class ComputationGraph { /** * Generate the output for all examples / batches in the input iterator , and concatenate them into a single array * per network output * @ param iterator Data to pass through the network * @ return output for all examples in the iterator */ public INDArray [ ] output ( MultiDataSetIterator iterator ) { } }
List < INDArray [ ] > outputs = new ArrayList < > ( ) ; while ( iterator . hasNext ( ) ) { MultiDataSet next = iterator . next ( ) ; INDArray [ ] out = output ( false , next . getFeatures ( ) , next . getFeaturesMaskArrays ( ) , next . getLabelsMaskArrays ( ) ) ; outputs . add ( out ) ; } INDArray [ ] [ ] arr = outputs . toArray ( new INDArray [ outputs . size ( ) ] [ 0 ] ) ; return DataSetUtil . mergeFeatures ( arr , null ) . getFirst ( ) ;
public class RunInstancesAction { /** * Launches the specified number of instances using an AMI ( Amazon Image ) for which you have permissions . * Notes : When you launch an instance , it enters the pending state . After the instance is ready for you , it enters * the running state . To check the state of your instance , call DescribeInstances . * To ensure faster instance launches , break up large requests into smaller batches . For example , create five * separate launch requests for 100 instances each instead of one launch request for 500 instances . * To tag your instance , ensure that it is running as CreateTags requires a resource ID . For more information * about tagging , see Tagging Your Amazon EC2 Resources . * If you don ' t specify a security group when launching an instance , Amazon EC2 uses the default security group . * For more information , see Security Groups in the Amazon Elastic Compute Cloud User Guide . * [ EC2 - VPC only accounts ] If you don ' t specify a subnet in the request , we choose a default subnet from your * default VPC for you . * [ EC2 - Classic accounts ] If you ' re launching into EC2 - Classic and you don ' t specify an Availability Zone , * we choose one for you . * Linux instances have access to the public key of the key pair at boot . You can use this key to provide * secure access to the instance . Amazon EC2 public images use this feature to provide secure access without * passwords . * For more information , see Key Pairs in the Amazon Elastic Compute Cloud User Guide . * You can provide optional user data when launching an instance . For more information , see Instance Metadata * in the Amazon Elastic Compute Cloud User Guide . * If any of the AMIs have a product code attached for which the user has not subscribed , RunInstances fails . * Some instance types can only be launched into a VPC . If you do not have a default VPC , or if you do not * specify a subnet ID in the request , RunInstances fails . For more information , see Instance Types Available * Only in a VPC . * For more information about troubleshooting , see What To Do If An Instance Immediately Terminates , and * Troubleshooting Connecting to Your Instance in the Amazon Elastic Compute Cloud User Guide . * @ param endpoint Optional - Endpoint to which request will be sent . * Default : " https : / / ec2 . amazonaws . com " * @ param identity ID of the secret access key associated with your Amazon AWS or IAM * account . * Example : " AKIAIOSFODNN7EXAMPLE " * @ param credential Secret access key associated with your Amazon AWS or IAM account . * Example : " wJalrXUtnFEMI / K7MDENG / bPxRfiCYEXAMPLEKEY " * @ param proxyHost Optional - proxy server used to connect to Amazon API . If empty no * proxy will be used . * Default : " " * @ param proxyPort Optional - proxy server port . You must either specify values for both * proxyHost and proxyPort inputs or leave them both empty . * Default : " " * @ param proxyUsername Optional - proxy server user name . * Default : " " * @ param proxyPassword Optional - proxy server password associated with the proxyUsername * input value . * @ param headers Optional - string containing the headers to use for the request separated * by new line ( CRLF ) . The header name - value pair will be separated by " : " . * Format : Conforming with HTTP standard for headers ( RFC 2616) * Examples : Accept : text / plain * Default : " " * @ param queryParams Optional - string containing query parameters that will be appended to * the URL . The names and the values must not be URL encoded because if * they are encoded then a double encoded will occur . The separator between * name - value pairs is " & " symbol . The query name will be separated from * query value by " = " . * Examples : " parameterName1 = parameterValue1 & parameterName2 = parameterValue2" * Default : " " * @ param version Optional - Version of the web service to made the call against it . * Example : " 2016-11-15" * Default : " 2016-11-15" * @ param delimiter Optional - delimiter that will be used . * Default : " , " * @ param availabilityZone Optional - availability zone of the instance ( as part of Placement ) . * Default : " " * @ param hostId Optional - ID of the dedicated host on which the instance resides * ( as part of Placement ) . This parameter is not support for the * ImportInstance command . * Default : " " * @ param imageId ID of the AMI , which you can get by calling DescribeImages . * For more information go to : http : / / docs . aws . amazon . com / AWSEC2 / latest / UserGuide / ComponentsAMIs . html * Example : " ami - abcdef12" * @ param instanceType Optional - Instance type . For more information , see Instance Types * in the Amazon Elastic Compute Cloud User Guide . * Valid values : t1 . micro | t2 . nano | t2 . micro | t2 . small | t2 . medium | * t2 . large | m1 . small | m1 . medium | m1 . large | m1 . xlarge | m3 . medium | * m3 . large | m3 . xlarge | m3.2xlarge | m4 . large | m4 . xlarge | m4.2xlarge | * m4.4xlarge | m4.10xlarge | m2 . xlarge | m2.2xlarge | m2.4xlarge | * cr1.8xlarge | r3 . large | r3 . xlarge | r3.2xlarge | r3.4xlarge | * r3.8xlarge | x1.4xlarge | x1.8xlarge | x1.16xlarge | x1.32xlarge | * i2 . xlarge | i2.2xlarge | i2.4xlarge | i2.8xlarge | hi1.4xlarge | * hs1.8xlarge | c1 . medium | c1 . xlarge | c3 . large | c3 . xlarge | * c3.2xlarge | c3.4xlarge | c3.8xlarge | c4 . large | c4 . xlarge | * c4.2xlarge | c4.4xlarge | c4.8xlarge | cc1.4xlarge | cc2.8xlarge | * g2.2xlarge | g2.8xlarge | cg1.4xlarge | d2 . xlarge | d2.2xlarge | * d2.4xlarge | d2.8xlarge " * Default : " m1 . small " * @ param kernelId Optional - ID of the kernel . * Important : We recommend that you use PV - GRUB instead of kernels * and RAM disks . For more information , see PV - GRUB in the Amazon * Elastic Compute Cloud User Guide . * Default : " " * @ param ramdiskId Optional - ID of the RAM disk . * Important : We recommend that you use PV - GRUB instead of kernels * and RAM disks . For more information , see PV - GRUB in the Amazon * Elastic Compute Cloud User Guide . * Default : " " * @ param subnetId Optional - String that contains one or more subnet IDs . If you * launch into EC2 Classic then supply values for this input and * don ' t supply values for Private IP Addresses string . * [ EC2 - VPC ] The ID of the subnet to launch the instance into . * Default : " " * @ param blockDeviceMappingDeviceNamesString Optional - String that contains one or more device names , exposed * to the instance , separated by delimiter . If you want to suppress * the specified device included in the block device mapping of the * AMI then supply " NoDevice " in string . * Examples : " / dev / sdc , / dev / sdd " , " / dev / sdh " , " xvdh " or " NoDevice " * Default : " " * @ param blockDeviceMappingVirtualNamesString Optional - String that contains one or more virtual names separated * by delimiter . Virtual device name is " ephemeralN " . Instance store * volumes are numbered starting from 0 . An instance type with 2 available * instance store volumes can specify mappings for ephemeral0 and ephemeral1. * The number of available instance store volumes depends on the instance * type . After you connect to the instance , you must mount the volume . * Constraints : For M3 instances , you must specify instance store volumes * in the block device mapping for the instance . When you launch an M3 * instance , we ignore any instance store volumes specified in the block * device mapping for the AMI . * Example : " ephemeral0 , ephemeral1 , Not relevant " * Default : " " * @ param deleteOnTerminationsString Optional - String that contains one or more values that indicates * whether a specific EBS volume will be deleted on instance termination . * Example : For a second EBS device ( from existing 4 devices ) , that * should be deleted , the string will be : " false , true , false , false " . * Valid values : " true " , " false " * Default : " " * @ param ebsOptimized Optional - Indicates whether the instance is optimized for EBS I / O . * This optimization provides dedicated throughput to Amazon EBS and an * optimized configuration stack to provide optimal EBS I / O performance . * This optimization isn ' t available with all instance types . Additional * usage charges apply when using an EBS - optimized instance . * Valid values : " true " , " false " * Default : " false " * @ param encryptedString Optional - String that contains one or more values that indicates * whether a specific EBS volume will be encrypted . Encrypted Amazon * EBS volumes may only be attached to instances that support Amazon * EBS encryption . * Example : For a second EBS device ( from existing 4 devices ) , that * should be encrypted , the string will be : " 0,1,0,0 " . If no value * provided the the default value of not encrypted will be considered * for all EBS specified devices . * Default : " " * @ param iopsString Optional - String that contains one or more values that specifies * the number of I / O operations per second ( IOPS ) that the volume supports . * For " io1 " , this represents the number of IOPS that are provisioned * for the volume . For " gp2 " , this represents the baseline performance * of the volume and the rate at which the volume accumulates I / O * credits for bursting . For more information about General Purpose * SSD baseline performance , I / O credits , and bursting , see Amazon * EBS Volume Types in the Amazon Elastic Compute Cloud User Guide . * Constraint : Range is 100-20000 IOPS for " io1 " volumes and 100-10000 * IOPS for " gp2 " volumes . * Condition : This parameter is required for requests to create " io1" * volumes ; it is not used in requests to create " gp2 " , " st1 " , " sc1 " , * or " standard " volumes . * Example : For a first EBS device ( from existing 3 devices ) , with * type " io1 " that should have 5000 IOPS as value the string will * be : " 5000 , , " . If no value provided then the default value for every * single EBS device will be used . * Default : " " * @ param snapshotIdsString Optional - String that contains one or more values of the snapshot * IDs to be used when creating the EBS device . * Example : For a last EBS device ( from existing 3 devices ) , to be * created using a snapshot as image the string will be : * " Not relevant , Not relevant , snap - abcdef12 " . If no value provided * then no snapshot will be used when creating EBS device . * Default : " " * @ param volumeSizesString Optional - String that contains one or more values of the sizes * ( in GiB ) for EBS devices . * Constraints : 1-16384 for General Purpose SSD ( " gp2 " ) , 4-16384 for * Provisioned IOPS SSD ( " io1 " ) , 500-16384 for Throughput Optimized * HDD ( " st1 " ) , 500-16384 for Cold HDD ( " sc1 " ) , and 1-1024 for Magnetic * ( " standard " ) volumes . If you specify a snapshot , the volume size * must be equal to or larger than the snapshot size . If you ' re creating * the volume from a snapshot and don ' t specify a volume size , the * default is the snapshot size . * Examples : " Not relevant , Not relevant , 100" * Default : " " * @ param volumeTypesString Optional - String that contains one or more values that specifies * the volume types : " gp2 " , " io1 " , " st1 " , " sc1 " , or " standard " . If * no value provided then the default value of " standard " for every * single EBS device type will be considered . * Default : " " * @ param privateIpAddress Optional - [ EC2 - VPC ] The primary IP address . You must specify a * value from the IP address range of the subnet . Only one private * IP address can be designated as primary . Therefore , you can ' t * specify this parameter if PrivateIpAddresses . n . Primary is set * to " true " and PrivateIpAddresses . n . PrivateIpAddress is set to * an IP address . * Default : We select an IP address from the IP address range of the * subnet . * @ param privateIpAddressesString Optional - String that contains one or more private IP addresses * to assign to the network interface . Only one private IP address * can be designated as primary . Use this if you want to launch instances * with many NICs attached . Separate the NICs privateIps with | * Example : " 10.0.0.1,20.0.0.1 | 30.0.0.1" * Default : " " * @ param iamInstanceProfileArn Optional - Amazon Resource Name ( IAM _ INSTANCE _ PROFILE _ ARN ) of the * instance profile . * Example : " arn : aws : iam : : 123456789012 : user / some _ user " * Default : " " * @ param iamInstanceProfileName Optional - Name of the instance profile . * Default : " " * @ param keyPairName Optional - Name of the key pair . You can create a key pair using * CreateKeyPair or ImportKeyPair . * Important : If you do not specify a key pair , you can ' t connect to * the instance unless you choose an AMI that is configured to allow * users another way to log in . * Default : " " * @ param securityGroupIdsString Optional - IDs of the security groups for the network interface . * Applies only if creating a network interface when launching an * instance . Separate the groupIds for each NIC with " | " * Example : " sg - 01234567 , sg - 7654321 | sg - abcdef01" * Default : " " * @ param securityGroupNamesString Optional - String that contains one or more IDs of the security * groups for the network interface . Applies only if creating a network * interface when launching an instance . * Default : " " * @ param affinity Optional - Affinity setting for the instance on the Dedicated Host * ( as part of Placement ) . This parameter is not supported for the * ImportInstance command . * Default : " " * @ param clientToken Optional - Unique , case - sensitive identifier you provide to ensure * the idem - potency of the request . For more information , see Ensuring * Idempotency . * Constraints : Maximum 64 ASCII characters * Default : " " * @ param disableApiTermination Optional - If you set this parameter to " true " , you can ' t terminate * the instance using the Amazon EC2 console , CLI , or API ; otherwise , * you can . If you set this parameter to " true " and then later want * to be able to terminate the instance , you must first change the * value of the disableApiTermination attribute to " false " using * ModifyInstanceAttribute . Alternatively , if you set InstanceInitiatedShutdownBehavior * to " terminate " , you can terminate the instance by running the shutdown * command from the instance . * Valid values : " true " , " false " * Default : " false " * @ param instanceInitiatedShutdownBehavior Optional - Indicates whether an instance stops or terminates when * you initiate shutdown from the instance ( using the operating system * command for system shutdown ) . * Valid values : " stop " , " terminate " * Default : " stop " * @ param maxCount Maximum number of instances to launch . If you specify more instances * than Amazon EC2 can launch in the target Availability Zone , Amazon * EC2 launches the largest possible number of instances above MinCount . * Constraints : Between 1 and the maximum number you ' re allowed for * the specified instance type . For more information about the default * limits , and how to request an increase , see : * https : / / aws . amazon . com / ec2 / faqs / # How _ many _ instances _ can _ I _ run _ in _ Amazon _ EC2 * Default : " 1" * @ param minCount Minimum number of instances to launch . If you specify a minimum * that is more instances than Amazon EC2 can launch in the target * Availability Zone , Amazon EC2 launches no instances . * Constraints : Between 1 and the maximum number you ' re allowed for * the specified instance type . For more information about the default * limits , and how to request an increase , see : * https : / / aws . amazon . com / ec2 / faqs / # How _ many _ instances _ can _ I _ run _ in _ Amazon _ EC2 * Default : " 1" * @ param monitoring Optional - whether to enable or not monitoring for the instance . * Default : " false " * @ param placementGroupName Optional - Name of the placement group for the instance ( as part * of Placement ) . * Default : " " * @ param tenancy Optional - Tenancy of an instance ( if the instance is running in * a VPC - as part of Placement ) . * An instance with a tenancy of dedicated runs on single - tenant hardware . * The host tenancy is not supported for the ImportInstance command . * Valid values : " dedicated " , " default " , " host " . * @ param userData Optional - The user data to make available to the instance . For * more information , see Running Commands on Your Linux Instance at * Launch ( Linux ) and Adding User Data ( Windows ) . If you are using * an AWS SDK or command line tool , Base64 - encoding is performed for * you , and you can load the text from a file . Otherwise , you must * provide Base64 - encoded text . * Default : " " * @ param networkInterfaceAssociatePublicIpAddress Optional - String that contains one or more values that indicates * whether to assign a public IP address or not when you launch in * a VPC . The public IP address can only be assigned to a network * interface for eth0 , and can only be assigned to a new network * interface , not an existing one . You cannot specify more than one * network interface in the request . If launching into a default subnet , * the default value is " true " . * Valid values : " true " , " false " * Default : " " * @ param networkInterfaceDeleteOnTerminationString Optional - String that contains one or more values that indicates * that the interface is deleted when the instance is terminated . * You can specify true only if creating a new network interface when * launching an instance . * Valid values : " true " , " false " * Default : " " * @ param networkInterfaceDescription Optional - String that contains one or more values that describe * the network interfaces . Applies only if creating a network interfaces * when launching an instance . * Default : " " * @ param networkInterfaceDeviceIndex Optional - String that contains one or more values that are indexes * of the device on the instance for the network interface attachment . * If you are specifying a network interface in a RunInstances request , * you should provide the device index . * Default : " " * @ param networkInterfaceId Optional - String that contains one or more values that are IDs * of the network interfaces . * Default : " " * @ param secondaryPrivateIpAddressCount Optional - The number of secondary private IP addresses . You can ' t * specify this option and specify more than one private IP address * using the private IP addresses option . Minimum valid number is 2. * Default : " " * @ return A map with strings as keys and strings as values that contains : outcome of the action ( or failure message * and the exception if there is one ) , returnCode of the operation and the ID of the request */ @ Action ( name = "Run Instances" , outputs = { } }
@ Output ( RETURN_CODE ) , @ Output ( RETURN_RESULT ) , @ Output ( INSTANCE_ID_RESULT ) , @ Output ( EXCEPTION ) } , responses = { @ Response ( text = SUCCESS , field = RETURN_CODE , value = ReturnCodes . SUCCESS , matchType = MatchType . COMPARE_EQUAL , responseType = ResponseType . RESOLVED ) , @ Response ( text = FAILURE , field = RETURN_CODE , value = ReturnCodes . FAILURE , matchType = MatchType . COMPARE_EQUAL , responseType = ResponseType . ERROR ) } ) public Map < String , String > execute ( @ Param ( value = ENDPOINT ) String endpoint , @ Param ( value = IDENTITY , required = true ) String identity , @ Param ( value = CREDENTIAL , required = true , encrypted = true ) String credential , @ Param ( value = PROXY_HOST ) String proxyHost , @ Param ( value = PROXY_PORT ) String proxyPort , @ Param ( value = PROXY_USERNAME ) String proxyUsername , @ Param ( value = PROXY_PASSWORD , encrypted = true ) String proxyPassword , @ Param ( value = HEADERS ) String headers , @ Param ( value = QUERY_PARAMS ) String queryParams , @ Param ( value = VERSION ) String version , @ Param ( value = DELIMITER ) String delimiter , @ Param ( value = AVAILABILITY_ZONE ) String availabilityZone , @ Param ( value = HOST_ID ) String hostId , @ Param ( value = IMAGE_ID , required = true ) String imageId , @ Param ( value = INSTANCE_TYPE ) String instanceType , @ Param ( value = KERNEL_ID ) String kernelId , @ Param ( value = RAMDISK_ID ) String ramdiskId , @ Param ( value = SUBNET_ID ) String subnetId , @ Param ( value = BLOCK_DEVICE_MAPPING_DEVICE_NAMES_STRING ) String blockDeviceMappingDeviceNamesString , @ Param ( value = BLOCK_DEVICE_MAPPING_VIRTUAL_NAMES_STRING ) String blockDeviceMappingVirtualNamesString , @ Param ( value = DELETE_ON_TERMINATIONS_STRING ) String deleteOnTerminationsString , @ Param ( value = EBS_OPTIMIZED ) String ebsOptimized , @ Param ( value = ENCRYPTED_STRING ) String encryptedString , @ Param ( value = IOPS_STRING ) String iopsString , @ Param ( value = SNAPSHOT_IDS_STRING ) String snapshotIdsString , @ Param ( value = VOLUME_SIZES_STRING ) String volumeSizesString , @ Param ( value = VOLUME_TYPES_STRING ) String volumeTypesString , @ Param ( value = PRIVATE_IP_ADDRESS ) String privateIpAddress , @ Param ( value = PRIVATE_IP_ADDRESSES_STRING ) String privateIpAddressesString , @ Param ( value = IAM_INSTANCE_PROFILE_ARN ) String iamInstanceProfileArn , @ Param ( value = IAM_INSTANCE_PROFILE_NAME ) String iamInstanceProfileName , @ Param ( value = KEY_PAIR_NAME ) String keyPairName , @ Param ( value = SECURITY_GROUP_IDS_STRING ) String securityGroupIdsString , @ Param ( value = SECURITY_GROUP_NAMES_STRING ) String securityGroupNamesString , @ Param ( value = AFFINITY ) String affinity , @ Param ( value = CLIENT_TOKEN ) String clientToken , @ Param ( value = LOWER_CASE_DISABLE_API_TERMINATION ) String disableApiTermination , @ Param ( value = LOWER_CASE_INSTANCE_INITIATED_SHUTDOWN_BEHAVIOR ) String instanceInitiatedShutdownBehavior , @ Param ( value = MAX_COUNT ) String maxCount , @ Param ( value = MIN_COUNT ) String minCount , @ Param ( value = MONITORING ) String monitoring , @ Param ( value = PLACEMENT_GROUP_NAME ) String placementGroupName , @ Param ( value = TENANCY ) String tenancy , @ Param ( value = LOWER_CASE_USER_DATA ) String userData , @ Param ( value = NETWORK_INTERFACE_ASSOCIATE_PUBLIC_IP_ADDRESS ) String networkInterfaceAssociatePublicIpAddress , @ Param ( value = NETWORK_INTERFACE_DELETE_ON_TERMINATION ) String networkInterfaceDeleteOnTerminationString , @ Param ( value = NETWORK_INTERFACE_DESCRIPTION ) String networkInterfaceDescription , @ Param ( value = NETWORK_INTERFACE_DEVICE_INDEX ) String networkInterfaceDeviceIndex , @ Param ( value = NETWORK_INTERFACE_ID ) String networkInterfaceId , @ Param ( value = SECONDARY_PRIVATE_IP_ADDRESS_COUNT ) String secondaryPrivateIpAddressCount ) { try { version = getDefaultStringInput ( version , INSTANCES_DEFAULT_API_VERSION ) ; instanceType = getDefaultStringInput ( instanceType , DEFAULT_INSTANCE_TYPE ) ; final CommonInputs commonInputs = new CommonInputs . Builder ( ) . withEndpoint ( endpoint , EC2_API , EMPTY ) . withIdentity ( identity ) . withCredential ( credential ) . withProxyHost ( proxyHost ) . withProxyPort ( proxyPort ) . withProxyUsername ( proxyUsername ) . withProxyPassword ( proxyPassword ) . withHeaders ( headers ) . withQueryParams ( queryParams ) . withVersion ( version ) . withDelimiter ( delimiter ) . withAction ( RUN_INSTANCES ) . withApiService ( EC2_API ) . withRequestUri ( EMPTY ) . withRequestPayload ( EMPTY ) . withHttpClientMethod ( HTTP_CLIENT_METHOD_GET ) . build ( ) ; final CustomInputs customInputs = new CustomInputs . Builder ( ) . withAvailabilityZone ( availabilityZone ) . withHostId ( hostId ) . withImageId ( imageId ) . withInstanceType ( instanceType ) . withKernelId ( kernelId ) . withRamdiskId ( ramdiskId ) . withSubnetId ( subnetId ) . build ( ) ; final EbsInputs ebsInputs = new EbsInputs . Builder ( ) . withBlockDeviceMappingDeviceNamesString ( blockDeviceMappingDeviceNamesString ) . withBlockDeviceMappingVirtualNamesString ( blockDeviceMappingVirtualNamesString ) . withDeleteOnTerminationsString ( deleteOnTerminationsString ) . withEbsOptimized ( ebsOptimized ) . withEncryptedString ( encryptedString ) . withIopsString ( iopsString ) . withSnapshotIdsString ( snapshotIdsString ) . withVolumeSizesString ( volumeSizesString ) . withVolumeTypesString ( volumeTypesString ) . build ( ) ; final ElasticIpInputs elasticIpInputs = new ElasticIpInputs . Builder ( ) . withPrivateIpAddress ( privateIpAddress ) . withPrivateIpAddressesString ( privateIpAddressesString ) . build ( ) ; final IamInputs iamInputs = new IamInputs . Builder ( ) . withIamInstanceProfileArn ( iamInstanceProfileArn ) . withIamInstanceProfileName ( iamInstanceProfileName ) . withKeyPairName ( keyPairName ) . withSecurityGroupIdsString ( securityGroupIdsString ) . withSecurityGroupNamesString ( securityGroupNamesString ) . build ( ) ; final InstanceInputs instanceInputs = new InstanceInputs . Builder ( ) . withAffinity ( affinity ) . withClientToken ( clientToken ) . withDisableApiTermination ( disableApiTermination ) . withInstanceInitiatedShutdownBehavior ( instanceInitiatedShutdownBehavior ) . withMaxCount ( maxCount ) . withMinCount ( minCount ) . withMonitoring ( monitoring ) . withPlacementGroupName ( placementGroupName ) . withTenancy ( tenancy ) . withUserData ( userData ) . build ( ) ; final NetworkInputs networkInputs = new NetworkInputs . Builder ( ) . withNetworkInterfacesAssociatePublicIpAddressesString ( networkInterfaceAssociatePublicIpAddress ) . withNetworkInterfaceDeleteOnTermination ( networkInterfaceDeleteOnTerminationString ) . withNetworkInterfaceDescription ( networkInterfaceDescription ) . withNetworkInterfaceDeviceIndex ( networkInterfaceDeviceIndex ) . withNetworkInterfaceId ( networkInterfaceId ) . withSecondaryPrivateIpAddressCount ( secondaryPrivateIpAddressCount ) . build ( ) ; Map < String , String > queryMapResult = new QueryApiExecutor ( ) . execute ( commonInputs , customInputs , ebsInputs , elasticIpInputs , iamInputs , instanceInputs , networkInputs ) ; if ( ( ReturnCodes . SUCCESS ) . equals ( queryMapResult . get ( RETURN_CODE ) ) ) { putResponseIn ( queryMapResult , INSTANCE_ID_RESULT , INSTANCE_ID_X_PATH_QUERY ) ; } return queryMapResult ; } catch ( Exception e ) { return ExceptionProcessor . getExceptionResult ( e ) ; }
public class SubClass { /** * Returns the constant map index to class * If entry doesn ' t exist it is created . * @ param type * @ return */ public final int resolveClassIndex ( TypeElement type ) { } }
int size = 0 ; int index = 0 ; constantReadLock . lock ( ) ; try { size = getConstantPoolSize ( ) ; index = getClassIndex ( type ) ; } finally { constantReadLock . unlock ( ) ; } if ( index == - 1 ) { String name = El . getInternalForm ( type ) ; int nameIndex = resolveNameIndex ( name ) ; index = addConstantInfo ( new Clazz ( nameIndex ) , size ) ; } addIndexedElement ( index , type ) ; return index ;
public class JdbcWriter { /** * Saves data . * @ param row Data structure representing a row as a Map of column _ name : column _ value * @ throws SQLException */ public void save ( Map < String , Object > row ) throws Exception { } }
Tuple2 < List < String > , String > data = sqlFromRow ( row ) ; PreparedStatement statement = conn . prepareStatement ( data . _2 ( ) ) ; int i = 1 ; for ( String columnName : data . _1 ( ) ) { statement . setObject ( i , row . get ( columnName ) ) ; i ++ ; } statement . executeUpdate ( ) ;
public class ClassLoaderUtil { /** * 加载外部类 * @ param jarOrDir jar文件或者包含jar和class文件的目录 * @ param name 类名 * @ return 类 * @ since 4.4.2 */ public static Class < ? > loadClass ( File jarOrDir , String name ) { } }
try { return getJarClassLoader ( jarOrDir ) . loadClass ( name ) ; } catch ( ClassNotFoundException e ) { throw new UtilException ( e ) ; }
public class EventDefinition { /** * Method to execute the esjp . * @ param _ parameter Parameter * @ return Return * @ throws EFapsException on error */ @ Override public Return execute ( final Parameter _parameter ) throws EFapsException { } }
Return ret = null ; _parameter . put ( ParameterValues . PROPERTIES , new HashMap < > ( super . evalProperties ( ) ) ) ; try { EventDefinition . LOG . debug ( "Invoking method '{}' for Resource '{}'" , this . methodName , this . resourceName ) ; final Class < ? > cls = Class . forName ( this . resourceName , true , EFapsClassLoader . getInstance ( ) ) ; final Method method = cls . getMethod ( this . methodName , new Class [ ] { Parameter . class } ) ; ret = ( Return ) method . invoke ( cls . newInstance ( ) , _parameter ) ; EventDefinition . LOG . debug ( "Terminated invokation of method '{}' for Resource '{}'" , this . methodName , this . resourceName ) ; } catch ( final SecurityException e ) { EventDefinition . LOG . error ( "security wrong: '{}'" , this . resourceName , e ) ; } catch ( final IllegalArgumentException e ) { EventDefinition . LOG . error ( "arguments invalid : '{}'- '{}'" , this . resourceName , this . methodName , e ) ; } catch ( final IllegalAccessException e ) { EventDefinition . LOG . error ( "could not access class: '{}'" , this . resourceName , e ) ; } catch ( final InvocationTargetException e ) { EventDefinition . LOG . error ( "could not invoke method: '{}' in class: '{}'" , this . methodName , this . resourceName , e ) ; throw ( EFapsException ) e . getCause ( ) ; } catch ( final ClassNotFoundException e ) { EventDefinition . LOG . error ( "class not found: '{}" + this . resourceName , e ) ; } catch ( final NoSuchMethodException e ) { EventDefinition . LOG . error ( "could not find method: '{}' in class '{}'" , new Object [ ] { this . methodName , this . resourceName , e } ) ; } catch ( final InstantiationException e ) { EventDefinition . LOG . error ( "could not instantiat Class: '{}'" , this . resourceName , e ) ; } return ret ;
public class Kute { /** * Finds the first resource in stream that matches given path . * @ param stream A stream of resources . * @ param path The path to search for . * @ param < R > The resource implementation . * @ return A matching resource , if found . */ public static < R extends Resource > Optional < R > findResource ( Stream < R > stream , String path ) { } }
return findFirstResource ( stream . filter ( r -> r . getPath ( ) . equals ( path ) ) ) ;
public class Result { /** * Create a Status from a previous status ' result / details * @ param value * @ param status * @ param details * @ return */ public static < R > Result < R > create ( R value , Result < ? > result ) { } }
return new Result < R > ( value , result . status , result . details , result . variables ) ;
public class JDBCCallableStatement { /** * # ifdef JAVA6 */ public synchronized void setNClob ( String parameterName , Reader reader ) throws SQLException { } }
super . setNClob ( findParameterIndex ( parameterName ) , reader ) ;
public class SchedulingHelper { /** * ( non - Javadoc ) * @ see java . util . concurrent . Future # get ( ) */ @ Override public V get ( ) throws InterruptedException , ExecutionException { } }
this . m_coordinationLatch . await ( ) ; if ( m_pendingException != null ) { throw m_pendingException ; } return m_defaultFuture . get ( ) ;
public class SpringPhysicalNamingStrategy { /** * Get an identifier for the specified details . By default this method will return an * identifier with the name adapted based on the result of * { @ link # isCaseInsensitive ( JdbcEnvironment ) } * @ param name the name of the identifier * @ param quoted if the identifier is quoted * @ param jdbcEnvironment the JDBC environment * @ return an identifier instance */ protected Identifier getIdentifier ( String name , boolean quoted , JdbcEnvironment jdbcEnvironment ) { } }
if ( isCaseInsensitive ( jdbcEnvironment ) ) { name = name . toLowerCase ( Locale . ROOT ) ; } return new Identifier ( name , quoted ) ;
public class TrifocalAlgebraicPoint7 { /** * Minimize the algebraic error using LM . The two epipoles are the parameters being optimized . */ private void minimizeWithGeometricConstraints ( ) { } }
extractEpipoles . setTensor ( solutionN ) ; extractEpipoles . extractEpipoles ( e2 , e3 ) ; // encode the parameters being optimized param [ 0 ] = e2 . x ; param [ 1 ] = e2 . y ; param [ 2 ] = e2 . z ; param [ 3 ] = e3 . x ; param [ 4 ] = e3 . y ; param [ 5 ] = e3 . z ; // adjust the error function for the current inputs errorFunction . init ( ) ; // set up the optimization algorithm optimizer . setFunction ( errorFunction , null ) ; optimizer . initialize ( param , gtol , ftol ) ; // optimize until convergence or the maximum number of iterations UtilOptimize . process ( optimizer , maxIterations ) ; // get the results and compute the trifocal tensor double found [ ] = optimizer . getParameters ( ) ; paramToEpipoles ( found , e2 , e3 ) ; enforce . process ( e2 , e3 , A ) ; enforce . extractSolution ( solutionN ) ;
public class OutbindRequestReceiver { /** * Notify that the outbind was accepted . * @ param outbind is the { @ link Outbind } command . * @ throws IllegalStateException if this method already called before . */ void notifyAcceptOutbind ( Outbind outbind ) throws IllegalStateException { } }
this . lock . lock ( ) ; try { if ( this . request == null ) { this . request = new OutbindRequest ( outbind ) ; this . requestCondition . signal ( ) ; } else { throw new IllegalStateException ( "Already waiting for acceptance outbind" ) ; } } finally { this . lock . unlock ( ) ; }
public class JLocaleChooser { /** * The ItemListener for the locales . */ public void itemStateChanged ( ItemEvent iEvt ) { } }
String item = ( String ) iEvt . getItem ( ) ; int i ; for ( i = 0 ; i < localeCount ; i ++ ) { if ( locales [ i ] . getDisplayName ( ) . equals ( item ) ) break ; } setLocale ( locales [ i ] , false ) ;