signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class ObrClassFinderService { /** * Bundle shutting down . */ public void stop ( BundleContext context ) throws Exception { } }
ClassServiceUtility . log ( context , LogService . LOG_INFO , "Stopping ObrClassFinderImpl" ) ; super . stop ( context ) ; repositoryAdmin = null ; waitingForRepositoryAdmin = false ; waitingForClassService = false ; ClassFinderActivator . setClassFinder ( null ) ;
public class SegmentMeanShiftSearchColor { /** * Uses mean - shift to find the peak . Returns the peak as an index in the image data array . * @ param meanColor The color value which mean - shift is trying to find a region which minimises it */ protected void findPeak ( float cx , float cy , float [ ] meanColor ) { } }
history . reset ( ) ; history . grow ( ) . set ( cx , cy ) ; for ( int i = 0 ; i < maxIterations ; i ++ ) { float total = 0 ; float sumX = 0 , sumY = 0 ; Arrays . fill ( sumColor , 0 ) ; int kernelIndex = 0 ; float x0 = cx - radiusX ; float y0 = cy - radiusY ; // If it is not near the image border it can use faster techniques if ( interpolate . isInFastBounds ( x0 , y0 ) && interpolate . isInFastBounds ( x0 + widthX - 1 , y0 + widthY - 1 ) ) { for ( int yy = 0 ; yy < widthY ; yy ++ ) { for ( int xx = 0 ; xx < widthX ; xx ++ ) { float ds = spacialTable [ kernelIndex ++ ] ; interpolate . get ( x0 + xx , y0 + yy , pixelColor ) ; float dc = distanceSq ( pixelColor , meanColor ) / maxColorDistanceSq ; float weight = dc > 1 ? 0 : weight ( ( ds + dc ) / 2f ) ; total += weight ; sumX += weight * ( xx + x0 ) ; sumY += weight * ( yy + y0 ) ; sumColor ( sumColor , pixelColor , weight ) ; } } } else { // Perform more sanity checks here for the image edge . Edge pixels are handled by skipping them for ( int yy = 0 ; yy < widthY ; yy ++ ) { float sampleY = y0 + yy ; // make sure it is inside the image if ( sampleY < 0 ) { kernelIndex += widthX ; continue ; } else if ( sampleY > image . height - 1 ) { break ; } for ( int xx = 0 ; xx < widthX ; xx ++ , kernelIndex ++ ) { float sampleX = x0 + xx ; // make sure it is inside the image if ( sampleX < 0 || sampleX > image . width - 1 ) { continue ; } float ds = spacialTable [ kernelIndex ] ; interpolate . get ( x0 + xx , y0 + yy , pixelColor ) ; float dc = distanceSq ( pixelColor , meanColor ) / maxColorDistanceSq ; float weight = dc > 1 ? 0 : weight ( ( ds + dc ) / 2f ) ; total += weight ; sumX += weight * ( xx + x0 ) ; sumY += weight * ( yy + y0 ) ; sumColor ( sumColor , pixelColor , weight ) ; } } } if ( total == 0 ) break ; float peakX = sumX / total ; float peakY = sumY / total ; if ( fast ) { history . grow ( ) . set ( peakX , peakY ) ; // see if it has already been here before int px = ( int ) ( peakX + 0.5f ) ; int py = ( int ) ( peakY + 0.5f ) ; int index = pixelToMode . getIndex ( px , py ) ; int modeIndex = pixelToMode . data [ index ] ; if ( modeIndex != - 1 ) { // it already knows the solution so stop searching Point2D_I32 modeP = modeLocation . get ( modeIndex ) ; this . modeX = modeP . x ; this . modeY = modeP . y ; return ; } } // move on to the next iteration float dx = peakX - cx ; float dy = peakY - cy ; cx = peakX ; cy = peakY ; meanColor ( sumColor , meanColor , total ) ; if ( Math . abs ( dx ) < convergenceTol && Math . abs ( dy ) < convergenceTol ) { break ; } } this . modeX = cx ; this . modeY = cy ;
public class WriteResources { /** * CreateFile Method . */ public StreamOut createFile ( String strFullFileName ) { } }
StreamOut streamOut = null ; try { File file = new File ( strFullFileName ) ; String pathName = file . getParent ( ) ; File fileDir = new File ( pathName ) ; fileDir . mkdirs ( ) ; streamOut = new StreamOut ( strFullFileName ) ; } catch ( IOException ex ) { ex . printStackTrace ( ) ; streamOut = null ; } return streamOut ;
public class MembershipHandlerImpl { /** * Migrates user memberships from old storage into new . * @ param oldUserNode * the node where user properties are stored ( from old structure ) * @ throws Exception */ void migrateMemberships ( Node oldUserNode ) throws Exception { } }
Session session = oldUserNode . getSession ( ) ; NodeIterator iterator = ( ( ExtendedNode ) oldUserNode ) . getNodesLazily ( ) ; while ( iterator . hasNext ( ) ) { Node oldMembershipNode = iterator . nextNode ( ) ; if ( oldMembershipNode . isNodeType ( MigrationTool . JOS_USER_MEMBERSHIP ) ) { String oldGroupUUID = utils . readString ( oldMembershipNode , MigrationTool . JOS_GROUP ) ; String oldMembershipTypeUUID = utils . readString ( oldMembershipNode , MembershipProperties . JOS_MEMBERSHIP_TYPE ) ; String userName = oldUserNode . getName ( ) ; String groupId = utils . readString ( session . getNodeByUUID ( oldGroupUUID ) , MigrationTool . JOS_GROUP_ID ) ; String membershipTypeName = session . getNodeByUUID ( oldMembershipTypeUUID ) . getName ( ) ; User user = service . getUserHandler ( ) . findUserByName ( userName ) ; Group group = service . getGroupHandler ( ) . findGroupById ( groupId ) ; MembershipType mt = service . getMembershipTypeHandler ( ) . findMembershipType ( membershipTypeName ) ; Membership existingMembership = findMembershipByUserGroupAndType ( userName , groupId , membershipTypeName ) ; if ( existingMembership != null ) { removeMembership ( existingMembership . getId ( ) , false ) ; } linkMembership ( user , group , mt , false ) ; } }
public class ConfigurableInterpreter { /** * Returns the result from applying the accumulator in all the nodes . * @ param nodes * flattened tree * @ return the result from applying the accumulator */ private final V process ( final Iterable < DiceNotationExpression > nodes ) { } }
accumulator . reset ( ) ; for ( final DiceNotationExpression current : nodes ) { if ( current instanceof BinaryOperation ) { accumulator . binaryOperation ( ( BinaryOperation ) current ) ; } else if ( current instanceof ConstantOperand ) { accumulator . constantOperand ( ( ConstantOperand ) current ) ; } else if ( current instanceof DiceOperand ) { accumulator . diceOperand ( ( DiceOperand ) current ) ; } else { LOGGER . warn ( "Unsupported expression of type {}" , current . getClass ( ) ) ; } } return accumulator . getValue ( ) ;
public class ArchetypeMerger { /** * / * Returns matching ( parent , specialized ) pairs of children of an attribute , in the order they should be present in * the flattened model . One of the element may be null in case of no specialization or extension . */ private List < Pair < CObject > > getChildPairs ( RmPath path , CAttribute parent , CAttribute specialized ) { } }
List < Pair < CObject > > result = new ArrayList < > ( ) ; for ( CObject parentChild : parent . getChildren ( ) ) { result . add ( new Pair < > ( parentChild , findSpecializedConstraintOfParentNode ( specialized , parentChild ) ) ) ; } for ( CObject specializedChild : specialized . getChildren ( ) ) { CObject parentChild = findParentConstraintOfSpecializedNode ( parent , specializedChild . getNodeId ( ) ) ; if ( parentChild == null ) { int index = getOrderIndex ( path , result , specializedChild ) ; if ( index >= 0 ) { result . add ( index , new Pair < > ( parentChild , specializedChild ) ) ; } else { result . add ( new Pair < > ( parentChild , specializedChild ) ) ; } } } return result ;
public class VisualizationManager { /** * Visualize a source to a file for a certain ebInterface version . * @ param eVersion * ebInterface version to use . May not be < code > null < / code > . * @ param aResource * Source resource . May not be < code > null < / code > . * @ param aDestinationFile * The file to write the result to . May not be < code > null < / code > . * @ return { @ link ESuccess } */ @ Nullable public static ESuccess visualizeToFile ( @ Nonnull final EEbInterfaceVersion eVersion , @ Nonnull final IReadableResource aResource , @ Nonnull final File aDestinationFile ) { } }
return visualize ( eVersion , TransformSourceFactory . create ( aResource ) , TransformResultFactory . create ( aDestinationFile ) ) ;
public class MaterializeKNNAndRKNNPreprocessor { /** * Extracts and removes the DBIDs in the given collections . * @ param extract a list of lists of DistanceResultPair to extract * @ param remove the ids to remove * @ return the DBIDs in the given collection */ protected ArrayDBIDs affectedkNN ( List < ? extends KNNList > extract , DBIDs remove ) { } }
HashSetModifiableDBIDs ids = DBIDUtil . newHashSet ( ) ; for ( KNNList drps : extract ) { for ( DBIDIter iter = drps . iter ( ) ; iter . valid ( ) ; iter . advance ( ) ) { ids . add ( iter ) ; } } ids . removeDBIDs ( remove ) ; // Convert back to array return DBIDUtil . newArray ( ids ) ;
public class DescribeReservedCacheNodesOfferingsResult { /** * A list of reserved cache node offerings . Each element in the list contains detailed information about one * offering . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setReservedCacheNodesOfferings ( java . util . Collection ) } or * { @ link # withReservedCacheNodesOfferings ( java . util . Collection ) } if you want to override the existing values . * @ param reservedCacheNodesOfferings * A list of reserved cache node offerings . Each element in the list contains detailed information about one * offering . * @ return Returns a reference to this object so that method calls can be chained together . */ public DescribeReservedCacheNodesOfferingsResult withReservedCacheNodesOfferings ( ReservedCacheNodesOffering ... reservedCacheNodesOfferings ) { } }
if ( this . reservedCacheNodesOfferings == null ) { setReservedCacheNodesOfferings ( new com . amazonaws . internal . SdkInternalList < ReservedCacheNodesOffering > ( reservedCacheNodesOfferings . length ) ) ; } for ( ReservedCacheNodesOffering ele : reservedCacheNodesOfferings ) { this . reservedCacheNodesOfferings . add ( ele ) ; } return this ;
public class MessageArgsUnitParser { /** * Select a factor set based on a name , used to format compact units . For example , * if compact = " bytes " we return a factor set DIGITAL _ BYTES . This set is then used * to produce the most compact form for a given value , e . g . " 1.2MB " , " 37TB " , etc . */ protected static UnitFactorSet selectFactorSet ( String compact , UnitConverter converter ) { } }
if ( compact != null ) { switch ( compact ) { case "angle" : case "angles" : return UnitFactorSets . ANGLE ; case "area" : return converter . areaFactors ( ) ; case "bit" : case "bits" : return UnitFactorSets . DIGITAL_BITS ; case "byte" : case "bytes" : return UnitFactorSets . DIGITAL_BYTES ; case "duration" : return UnitFactorSets . DURATION ; case "duration-large" : return UnitFactorSets . DURATION_LARGE ; case "duration-small" : return UnitFactorSets . DURATION_SMALL ; case "electric" : return UnitFactorSets . ELECTRIC ; case "energy" : return UnitFactorSets . ENERGY ; case "frequency" : return UnitFactorSets . FREQUENCY ; case "length" : return converter . lengthFactors ( ) ; case "mass" : return converter . massFactors ( ) ; case "power" : return UnitFactorSets . POWER ; case "volume" : return converter . volumeFactors ( ) ; case "liquid" : return converter . volumeLiquidFactors ( ) ; default : break ; } } return null ;
public class AWSCodePipelineClient { /** * Returns information about any jobs for AWS CodePipeline to act upon . PollForJobs is only valid for action types * with " Custom " in the owner field . If the action type contains " AWS " or " ThirdParty " in the owner field , the * PollForJobs action returns an error . * < important > * When this API is called , AWS CodePipeline returns temporary credentials for the Amazon S3 bucket used to store * artifacts for the pipeline , if the action requires access to that Amazon S3 bucket for input or output artifacts . * Additionally , this API returns any secret values defined for the action . * < / important > * @ param pollForJobsRequest * Represents the input of a PollForJobs action . * @ return Result of the PollForJobs operation returned by the service . * @ throws ValidationException * The validation was specified in an invalid format . * @ throws ActionTypeNotFoundException * The specified action type cannot be found . * @ sample AWSCodePipeline . PollForJobs * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / codepipeline - 2015-07-09 / PollForJobs " target = " _ top " > AWS API * Documentation < / a > */ @ Override public PollForJobsResult pollForJobs ( PollForJobsRequest request ) { } }
request = beforeClientExecution ( request ) ; return executePollForJobs ( request ) ;
public class CopyObjectRequest { /** * Sets the AWS Key Management System parameters used to encrypt the object * on server side . */ public void setSSEAwsKeyManagementParams ( SSEAwsKeyManagementParams params ) { } }
if ( params != null && this . destinationSSECustomerKey != null ) { throw new IllegalArgumentException ( "Either SSECustomerKey or SSEAwsKeyManagementParams must not be set at the same time." ) ; } this . sseAwsKeyManagementParams = params ;
public class AbstractJMeterMojo { /** * Try to load the active maven proxy . */ protected void loadMavenProxy ( ) { } }
if ( null == settings ) { return ; } Proxy mvnProxy = settings . getActiveProxy ( ) ; if ( mvnProxy != null ) { ProxyConfiguration newProxyConfiguration = new ProxyConfiguration ( ) ; newProxyConfiguration . setHost ( mvnProxy . getHost ( ) ) ; newProxyConfiguration . setPort ( mvnProxy . getPort ( ) ) ; newProxyConfiguration . setUsername ( mvnProxy . getUsername ( ) ) ; newProxyConfiguration . setPassword ( mvnProxy . getPassword ( ) ) ; newProxyConfiguration . setHostExclusions ( mvnProxy . getNonProxyHosts ( ) ) ; proxyConfig = newProxyConfiguration ; getLog ( ) . info ( "Maven proxy loaded successfully" ) ; } else { getLog ( ) . warn ( "No maven proxy found, however useMavenProxy is set to true!" ) ; }
public class ProxyFactory { /** * Adds the following code to a delegating method : * < code > * if ( ! this . constructed ) return super . thisMethod ( ) * < / code > * This means that the proxy will not start to delegate to the underlying * bean instance until after the constructor has finished . */ protected void addConstructedGuardToMethodBody ( final ClassMethod classMethod , String className ) { } }
if ( ! useConstructedFlag ( ) ) { return ; } // now create the conditional final CodeAttribute cond = classMethod . getCodeAttribute ( ) ; cond . aload ( 0 ) ; cond . getfield ( classMethod . getClassFile ( ) . getName ( ) , CONSTRUCTED_FLAG_NAME , BytecodeUtils . BOOLEAN_CLASS_DESCRIPTOR ) ; // jump if the proxy constructor has finished BranchEnd jumpMarker = cond . ifne ( ) ; // generate the invokespecial call to the super class method // this is run when the proxy is being constructed cond . aload ( 0 ) ; cond . loadMethodParameters ( ) ; cond . invokespecial ( className , classMethod . getName ( ) , classMethod . getDescriptor ( ) ) ; cond . returnInstruction ( ) ; cond . branchEnd ( jumpMarker ) ;
public class BoltSendableResponseCallback { /** * 发送响应数据 * @ param response 响应 * @ param sofaException SofaRpcException */ protected void sendSofaResponse ( SofaResponse response , SofaRpcException sofaException ) { } }
try { if ( RpcInvokeContext . isBaggageEnable ( ) ) { BaggageResolver . carryWithResponse ( RpcInvokeContext . peekContext ( ) , response ) ; } asyncContext . sendResponse ( response ) ; } finally { if ( EventBus . isEnable ( ServerSendEvent . class ) ) { EventBus . post ( new ServerSendEvent ( request , response , sofaException ) ) ; } if ( EventBus . isEnable ( ServerEndHandleEvent . class ) ) { EventBus . post ( new ServerEndHandleEvent ( ) ) ; } }
public class BaseProfile { /** * generate package . html * @ param def Definition * @ param outputDir main or test * @ param subDir sub - directory */ void generatePackageInfo ( Definition def , String outputDir , String subDir ) { } }
try { FileWriter fw ; PackageInfoGen phGen ; if ( outputDir . equals ( "test" ) ) { fw = Utils . createTestFile ( "package-info.java" , def . getRaPackage ( ) , def . getOutputDir ( ) ) ; phGen = new PackageInfoGen ( ) ; } else { if ( subDir == null ) { fw = Utils . createSrcFile ( "package-info.java" , def . getRaPackage ( ) , def . getOutputDir ( ) ) ; phGen = new PackageInfoGen ( ) ; } else { fw = Utils . createSrcFile ( "package-info.java" , def . getRaPackage ( ) + "." + subDir , def . getOutputDir ( ) ) ; phGen = new PackageInfoGen ( subDir ) ; } } phGen . generate ( def , fw ) ; fw . close ( ) ; } catch ( IOException ioe ) { ioe . printStackTrace ( ) ; }
public class Data { /** * Sort a list * @ param < T > the type * @ param aVOs the value objects * @ return collection the sorted criteria */ public static < T > Collection < T > sortByCriteria ( Collection < T > aVOs ) { } }
final List < T > list ; if ( aVOs instanceof List ) list = ( List < T > ) aVOs ; else list = new ArrayList < T > ( aVOs ) ; Collections . sort ( list , new CriteriaComparator ( ) ) ; return list ;
public class ZipUtil { /** * Unpacks a single entry from a ZIP stream . * @ param is * ZIP stream . * @ param name * entry name . * @ return contents of the entry or < code > null < / code > if it was not found . */ public static byte [ ] unpackEntry ( InputStream is , String name ) { } }
ByteArrayUnpacker action = new ByteArrayUnpacker ( ) ; if ( ! handle ( is , name , action ) ) return null ; // entry not found return action . getBytes ( ) ;
public class FileSystem { /** * Decode the given file to obtain a string representation * which is compatible with the URL standard . * This function was introduced to have a work around * on the ' \ ' character on Windows operating system . * @ param file the file . * @ return the string representation of the file . * @ since 6.2 */ private static String fromFileStandardToURLStandard ( String file ) { } }
if ( file == null ) { return null ; } if ( isFileCompatibleWithURL == null ) { isFileCompatibleWithURL = Boolean . valueOf ( URL_PATH_SEPARATOR . equals ( File . separator ) ) ; } String filePath = file ; if ( ! isFileCompatibleWithURL ) { filePath = filePath . replaceAll ( Pattern . quote ( File . separator ) , Matcher . quoteReplacement ( URL_PATH_SEPARATOR ) ) ; } // Add root slash for Windows paths that are starting with a disk id . if ( Pattern . matches ( "^[a-zA-Z][:|].*$" , filePath ) ) { // $ NON - NLS - 1 $ filePath = URL_PATH_SEPARATOR + filePath ; } return filePath ;
public class SequenceConverter { /** * method to get for all peptides the sequence * @ param helm2notation * HELM2Notation * @ return rna sequences divided by white space * @ throws HELM2HandledException * if the polymer contains HELM2 features * @ throws PeptideUtilsException * if the polymer is not a peptide * @ throws org . helm . notation2 . exception . NotationException if notation is not valid * @ throws ChemistryException * if the Chemistry Engine can not be initialized */ public static String getPeptideSequenceFromNotation ( HELM2Notation helm2notation ) throws HELM2HandledException , PeptideUtilsException , org . helm . notation2 . exception . NotationException , ChemistryException { } }
List < PolymerNotation > polymers = helm2notation . getListOfPolymers ( ) ; StringBuffer sb = new StringBuffer ( ) ; for ( PolymerNotation polymer : polymers ) { sb . append ( PeptideUtils . getSequence ( polymer ) + " " ) ; } sb . setLength ( sb . length ( ) - 1 ) ; return sb . toString ( ) ;
public class CollidableUpdater { /** * IdentifiableListener */ @ Override public void notifyDestroyed ( Integer id ) { } }
enabled = false ; boxs . clear ( ) ; cacheColls . clear ( ) ; cacheRect . clear ( ) ;
public class DocBookBuilder { /** * Creates a dummy translated topic so that a book can be built using the same relationships as a normal build . * @ param topic The topic to create the dummy topic from . * @ param locale The locale to build the dummy translations for . * @ return The dummy translated topic . */ private TranslatedTopicWrapper createDummyTranslatedTopic ( final TopicWrapper topic , final LocaleWrapper locale ) { } }
final TranslatedTopicWrapper translatedTopic = translatedTopicProvider . newTranslatedTopic ( ) ; translatedTopic . setTopic ( topic ) ; translatedTopic . setId ( topic . getId ( ) * - 1 ) ; // If we get to this point then no translation exists or the default locale translation failed to be downloaded . translatedTopic . setTopicId ( topic . getId ( ) ) ; translatedTopic . setTopicRevision ( topic . getRevision ( ) ) ; translatedTopic . setTranslationPercentage ( 100 ) ; translatedTopic . setXml ( topic . getXml ( ) ) ; translatedTopic . setTags ( topic . getTags ( ) ) ; translatedTopic . setSourceURLs ( topic . getSourceURLs ( ) ) ; translatedTopic . setProperties ( topic . getProperties ( ) ) ; translatedTopic . setLocale ( locale ) ; translatedTopic . setTitle ( topic . getTitle ( ) ) ; return translatedTopic ;
public class PmiRegistry { /** * return the top level modules ' s PerfLevelDescriptor in String */ public static String getInstrumentationLevelString ( ) { } }
if ( disabled ) return null ; Map modules = moduleRoot . children ; if ( modules == null ) { return "" ; } else { PerfLevelDescriptor [ ] plds = new PerfLevelDescriptor [ modules . size ( ) ] ; Iterator values = modules . values ( ) . iterator ( ) ; int i = 0 ; while ( values . hasNext ( ) ) { PmiModule instance = ( ( ModuleItem ) values . next ( ) ) . getInstance ( ) ; plds [ i ++ ] = new PerfLevelDescriptor ( instance . getPath ( ) , instance . getInstrumentationLevel ( ) , instance . getModuleID ( ) ) ; } return PmiUtil . getStringFromPerfLevelSpecs ( plds ) ; }
public class CommerceAddressLocalServiceWrapper { /** * Deletes the commerce address from the database . Also notifies the appropriate model listeners . * @ param commerceAddress the commerce address * @ return the commerce address that was removed * @ throws PortalException */ @ Override public com . liferay . commerce . model . CommerceAddress deleteCommerceAddress ( com . liferay . commerce . model . CommerceAddress commerceAddress ) throws com . liferay . portal . kernel . exception . PortalException { } }
return _commerceAddressLocalService . deleteCommerceAddress ( commerceAddress ) ;
public class SocialTemplate { /** * Property names for Facebook - Override to customize * @ param props * @ return */ protected SocialProfile parseProfile ( Map < String , Object > props ) { } }
if ( ! props . containsKey ( "id" ) ) { throw new IllegalArgumentException ( "No id in profile" ) ; } SocialProfile profile = SocialProfile . with ( props ) . displayName ( "name" ) . first ( "first_name" ) . last ( "last_name" ) . id ( "id" ) . username ( "username" ) . profileUrl ( "link" ) . build ( ) ; profile . setThumbnailUrl ( getBaseUrl ( ) + "/" + profile . getId ( ) + "/picture" ) ; return profile ;
public class SecurityUtils { /** * Creates a { @ link SSLConnectionSocketFactory } which is careless about SSL * certificate checks . Use with caution ! * @ return */ @ SuppressWarnings ( "checkstyle:AbbreviationAsWordInName" ) public static SSLConnectionSocketFactory createUnsafeSSLConnectionSocketFactory ( ) { } }
try { final SSLContextBuilder builder = new SSLContextBuilder ( ) ; builder . loadTrustMaterial ( null , new TrustSelfSignedStrategy ( ) ) ; return new SSLConnectionSocketFactory ( builder . build ( ) , new NaiveHostnameVerifier ( ) ) ; } catch ( final Exception e ) { throw new IllegalStateException ( e ) ; }
public class CommandLineTools { /** * Tags text using the LanguageTool tagger , printing results to System . out . * @ param contents Text to tag . * @ param lt LanguageTool instance */ public static void tagText ( String contents , JLanguageTool lt ) throws IOException { } }
AnalyzedSentence analyzedText ; List < String > sentences = lt . sentenceTokenize ( contents ) ; for ( String sentence : sentences ) { analyzedText = lt . getAnalyzedSentence ( sentence ) ; System . out . println ( analyzedText ) ; }
public class ReportResourcesImpl { /** * Sets the publish status of a report and returns the new status , including the URLs of any * enabled publishing . * It mirrors to the following Smartsheet REST API method : PUT / reports / { id } / publish * Exceptions : * - InvalidRequestException : if there is any problem with the REST API request * - AuthorizationException : if there is any problem with the REST API authorization ( access token ) * - ResourceNotFoundException : if the resource can not be found * - ServiceUnavailableException : if the REST API service is not available ( possibly due to rate limiting ) * - SmartsheetRestException : if there is any other REST API related error occurred during the operation * - SmartsheetException : if there is any other error occurred during the operation * @ param id the ID of the report * @ param publish the ReportPublish object * @ return the updated ReportPublish ( note that if there is no such resource , this method will * throw ResourceNotFoundException rather than returning null ) * @ throws IllegalArgumentException if any argument is null or empty string * @ throws InvalidRequestException if there is any problem with the REST API request * @ throws AuthorizationException if there is any problem with the REST API authorization ( access token ) * @ throws ResourceNotFoundException if the resource cannot be found * @ throws ServiceUnavailableException if the REST API service is not available ( possibly due to rate limiting ) * @ throws SmartsheetException if there is any other error during the operation */ public ReportPublish updatePublishStatus ( long id , ReportPublish reportPublish ) throws SmartsheetException { } }
return this . updateResource ( "reports/" + id + "/publish" , ReportPublish . class , reportPublish ) ;
public class DescribeWorkingStorageResult { /** * An array of the gateway ' s local disk IDs that are configured as working storage . Each local disk ID is specified * as a string ( minimum length of 1 and maximum length of 300 ) . If no local disks are configured as working storage , * then the DiskIds array is empty . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setDiskIds ( java . util . Collection ) } or { @ link # withDiskIds ( java . util . Collection ) } if you want to override * the existing values . * @ param diskIds * An array of the gateway ' s local disk IDs that are configured as working storage . Each local disk ID is * specified as a string ( minimum length of 1 and maximum length of 300 ) . If no local disks are configured as * working storage , then the DiskIds array is empty . * @ return Returns a reference to this object so that method calls can be chained together . */ public DescribeWorkingStorageResult withDiskIds ( String ... diskIds ) { } }
if ( this . diskIds == null ) { setDiskIds ( new com . amazonaws . internal . SdkInternalList < String > ( diskIds . length ) ) ; } for ( String ele : diskIds ) { this . diskIds . add ( ele ) ; } return this ;
public class IdentifierToken { /** * Update the value represented by this token on the given object < code > object < / code > with * the value < code > value < / code > . * @ param object the object to update * @ param value the new value */ public void write ( Object object , Object value ) { } }
if ( object == null ) { String msg = "Can not update the identifier \"" + _identifier + "\" on a null value object." ; LOGGER . error ( msg ) ; throw new RuntimeException ( msg ) ; } if ( TRACE_ENABLED ) LOGGER . trace ( "Update property named \"" + _identifier + "\" on object of type: \"" + object . getClass ( ) . getName ( ) + "\"" ) ; if ( object instanceof Map ) mapUpdate ( ( Map ) object , _identifier , value ) ; else if ( object instanceof List ) { int i = parseIndex ( _identifier ) ; listUpdate ( ( List ) object , i , value ) ; } else if ( object . getClass ( ) . isArray ( ) ) { int i = parseIndex ( _identifier ) ; arrayUpdate ( object , i , value ) ; } else beanUpdate ( object , _identifier , value ) ;
public class Input { /** * Get the name of the axis with the given index * @ param controller The index of the controller to check * @ param axis The index of the axis to read * @ return The name of the specified axis */ public String getAxisName ( int controller , int axis ) { } }
return ( ( Controller ) controllers . get ( controller ) ) . getAxisName ( axis ) ;
public class PersistentTimerTaskHandler { /** * Retrieves the application specified information that is to be * delivered along with the Timer task expiration . A new copy of * the serializable object is returned every time . < p > * Note : the info object may be returned ( or null ) even if the application * has been uninstalled and the TimerTaskHandler object is not usable . * Generally , this method will only fail if the info object itself * could not be deserialized . < p > * @ return the application specified information . * @ throws TimerServiceException if the application specified information * object could not be deserialized . */ public Serializable getUserInfo ( ) throws TimerServiceException { } }
if ( userInfoBytes == null ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "getUserInfo: null" ) ; return null ; } Serializable userInfo = null ; try { // Use timer application ClassLoader if installed ; otherwise use current thread context ClassLoader ClassLoader loader = EJSContainer . getClassLoader ( j2eeName ) ; if ( loader == null ) { loader = AccessController . doPrivileged ( new GetContextClassLoaderPrivileged ( ) ) ; } ByteArrayInputStream bais = new ByteArrayInputStream ( userInfoBytes ) ; EJBRuntime ejbRuntime = EJSContainer . getDefaultContainer ( ) . getEJBRuntime ( ) ; final ObjectInputStream objIstream = ejbRuntime . createObjectInputStream ( bais , loader ) ; userInfo = ( Serializable ) AccessController . doPrivileged ( new PrivilegedExceptionAction < Object > ( ) { @ Override public Object run ( ) throws ClassNotFoundException , IOException { return objIstream . readObject ( ) ; } } ) ; } catch ( PrivilegedActionException paex ) { // Most likely the application is no longer installed , and so Info class // could not be found , or there is a bug in the user info serialization code . // Let the caller decide whether to FFDC this scenario , since our MBean supports // accessing timers when the corresponding application is not installed . TimerServiceException ex = new TimerServiceException ( "Failure deserializing timer info object." , paex . getException ( ) ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "getUserInfo: " + ex ) ; throw ex ; } catch ( IOException ioex ) { FFDCFilter . processException ( ioex , CLASS_NAME + ".getUserInfo" , "406" ) ; TimerServiceException ex = new TimerServiceException ( "Failure deserializing timer info object." , ioex ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "getUserInfo: " + ex ) ; throw ex ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "getUserInfo: " + Util . identity ( userInfo ) ) ; return userInfo ;
public class HtmlInput { /** * Capture events that occur anywhere on the page . * Note that event values will be relative to the page ( not the * rootElement ) { @ see # getRelativeX ( NativeEvent , Element ) } and { @ see * # getRelativeY ( NativeEvent , Element ) } . */ static void capturePageEvent ( String eventName , final EventHandler handler ) { } }
HtmlPlatform . captureEvent ( eventName , new EventHandler ( ) { @ Override public void handleEvent ( NativeEvent evt ) { handler . handleEvent ( evt ) ; } } ) ;
public class InternalTransaction { /** * Perform a number of additions , optimistic replace updates and deletes of * ManagedOjects but writes and single log record with a list of objects to replace . * Optimistic replace does not required the ManagedObject to be in locked state , * it can be in Added , Locked , Replaced , Deleted , Ready states and be used by another * transaction However , it is up to the code updating the object to correctly * reverse the changes at the time any backout of this transaction occurs . Because * the transaction is not locked no before immage of the object is kept . Unlike * standard replace we do not need to hold the update out of the ObjectStore * until commit time because the updating code is responsible for reversing the * effects of the update . These compensating changes can be made after the transaction has * prepared . * The advantage of this method is that all of the updated objects are handled * together , so that they all make it into the log for restart . If they were added * to the log separately we might fail and see some of them at restart and not others * This allows objects such as lists to make several associated changes without * having to worry about compensating for the intermediate states . * If any exception occurs in preAdd , preOptimisticReplace or preDelete then the following * preAdd , preOptimisticReplace or preDelete callbacks are not made nor are any * postAdd , postOptimisticReplace or postDelete or optimisticReplaceLogged callbacks . * @ param managedObjectsToAdd list of ManagedObjects to add , * or null if there are none . * @ param managedObjectsToReplace list of ManagedObjects to be optimistically replaced . * or null if there are none . * @ param managedObjectsToDelete list of ManagedObjects to delete , * or null if there are none . * @ param tokensToNotify list of tokens to be notified once the replace is logged , * or null if there are none . * @ param transaction the external Transaction . * @ param logSpaceReservedDelta extra log space the caller wants reserved until the transaction completes . * @ throws ObjectManagerException */ protected void optimisticReplace ( java . util . List managedObjectsToAdd , java . util . List managedObjectsToReplace , java . util . List managedObjectsToDelete , java . util . List tokensToNotify , Transaction transaction , long logSpaceReservedDelta ) throws ObjectManagerException { } }
final String methodName = "optimisticReplace" ; if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . entry ( this , cclass , methodName , new Object [ ] { managedObjectsToAdd , managedObjectsToReplace , managedObjectsToDelete , tokensToNotify , transaction , new Long ( logSpaceReservedDelta ) } ) ; // Keep a subset list of Tokens for persistent ManagedObjects to add . java . util . List persistentTokensToAdd = new java . util . ArrayList ( ) ; // Keep a subset list of Tokens for persistent ManagedObjects to optimistically replace . java . util . List persistentTokensToReplace = new java . util . ArrayList ( ) ; // Also update the list of the serializedBytes . java . util . List persistentSerializedBytes = new java . util . ArrayList ( ) ; // A local Map of serializedBytes , so that we can add it to the set known to be logged , // once we have written the transactionOptimisticReplaceLogRecord . java . util . Map newLoggedSerializedBytes = new java . util . HashMap ( ) ; // Keep a subset list of persistent tokens to delete . java . util . List persistentTokensToDelete = new java . util . ArrayList ( ) ; // Keep a subset list of persistent tokens to notify . java . util . List persistentTokensToNotify = new java . util . ArrayList ( ) ; long logSequenceNumber ; // Sequence number of the log record written . // This try block catches any fatal error that causes us to fail to write the transaction log or reserve space // in the Objectstores or make the preAdd / Replace / OptimisticReplace / Delete calls to the ManagedObjects . try { if ( managedObjectsToAdd != null ) { for ( java . util . Iterator managedObjectIterator = managedObjectsToAdd . iterator ( ) ; managedObjectIterator . hasNext ( ) ; ) { ManagedObject managedObject = ( ManagedObject ) managedObjectIterator . next ( ) ; // Make the ManagedObjects ready for an optimisticReplace , // give it a chance to blow up in anything is wrong . managedObject . preAdd ( transaction ) ; // Build the subset of persistent objects . if ( managedObject . owningToken . getObjectStore ( ) . getPersistence ( ) ) { persistentTokensToAdd . add ( managedObject . owningToken ) ; // We have to allow for some other transaction to modify the object between the end of this transaction and // when we actually write it to the ObjectStore . // We address this by keeping a copy of the serialised immage of the object when we create the // log record along with the logSequenceNumber when we write it . // A higher level lock , such as synchronizing on LinkedList must protect against our capturing the wrong // serialized version of the ManagedObject . ObjectManagerByteArrayOutputStream serializedBytes = managedObject . getSerializedBytes ( ) ; persistentSerializedBytes . add ( serializedBytes ) ; // Remember what we loggged in case we commit this version of the ManagedObject . newLoggedSerializedBytes . put ( managedObject . owningToken , serializedBytes ) ; } else if ( managedObject . owningToken . getObjectStore ( ) . getUsesSerializedForm ( ) ) { // We need to capture the serialized form of the ManagedObject for the ObjectStore ? ObjectManagerByteArrayOutputStream serializedBytes = managedObject . getSerializedBytes ( ) ; newLoggedSerializedBytes . put ( managedObject . owningToken , serializedBytes ) ; } // if . . . persistent . } // for . . . managedObjectsToAdd . } // if ( managedObjectsToAdd ! = null ) . if ( managedObjectsToReplace != null ) { for ( java . util . Iterator managedObjectIterator = managedObjectsToReplace . iterator ( ) ; managedObjectIterator . hasNext ( ) ; ) { ManagedObject managedObject = ( ManagedObject ) managedObjectIterator . next ( ) ; // Make the ManagedObjects ready for an optimisticReplace , // give it a chance to blow up in anything is wrong . managedObject . preOptimisticReplace ( transaction ) ; // Build the subset of persistent objects . if ( managedObject . owningToken . getObjectStore ( ) . getPersistence ( ) ) { persistentTokensToReplace . add ( managedObject . owningToken ) ; // We have to allow for some other transaction to modify the object between the end of this transaction and // when we actually write it to the ObjectStore . // We address this by keeping a copy of the serialised immage of the object when we create the // log record along with the logSequenceNumber when we write it . // A higher level lock , such as synchronizing on LinkedList must protect against our capturing the wrong // serialized version of the ManagedObject . ObjectManagerByteArrayOutputStream serializedBytes = managedObject . getSerializedBytes ( ) ; persistentSerializedBytes . add ( serializedBytes ) ; // Remember what we loggged in case we commit this version of the ManagedObject . newLoggedSerializedBytes . put ( managedObject . owningToken , serializedBytes ) ; } else if ( managedObject . owningToken . getObjectStore ( ) . getUsesSerializedForm ( ) ) { // We need to capture the serialized form of the ManagedObject for the ObjectStore ? ObjectManagerByteArrayOutputStream serializedBytes = managedObject . getSerializedBytes ( ) ; newLoggedSerializedBytes . put ( managedObject . owningToken , serializedBytes ) ; } // if . . . persistent . } // for . . . managedObjectsToReplace . } // if ( managedObjectsToReplace ! = null ) . if ( managedObjectsToDelete != null ) { for ( java . util . Iterator managedObjectIterator = managedObjectsToDelete . iterator ( ) ; managedObjectIterator . hasNext ( ) ; ) { ManagedObject managedObject = ( ManagedObject ) managedObjectIterator . next ( ) ; // Make the ManagedObject ready for an deletion , // give it a chance to blow up in anything is wrong . managedObject . preDelete ( transaction ) ; // Build the subset of persistent objects . if ( managedObject . owningToken . getObjectStore ( ) . getPersistence ( ) ) { persistentTokensToDelete . add ( managedObject . owningToken ) ; } // if . . . persistent . } // for . . . managedObjectsToDelete . } // if ( managedObjectsToDelete ! = null ) . if ( tokensToNotify != null ) { for ( java . util . Iterator tokenIterator = tokensToNotify . iterator ( ) ; tokenIterator . hasNext ( ) ; ) { Token token = ( Token ) tokenIterator . next ( ) ; // Build the subset of persistent objects . if ( token . getObjectStore ( ) . getPersistence ( ) ) { persistentTokensToNotify . add ( token ) ; } // if ( token . getObjectStore ( ) . persistent ) . } // for . . . tokenToNotify . } // if ( tokensToNotify ! = null ) . } catch ( ObjectManagerException exception ) { // The write was not done . // No FFDC Code Needed . ObjectManager . ffdc . processException ( this , cclass , methodName , exception , "1:1305:1.41" ) ; postOptmisticReplace ( managedObjectsToAdd , managedObjectsToReplace , managedObjectsToDelete , transaction ) ; if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . exit ( this , cclass , methodName , new Object [ ] { exception } ) ; throw exception ; } // catch ( ObjectManagerException . . . synchronized ( this ) { // To defend against two application threads completing the same transaction and trying to // continue with it at the same time we check that the Transaction still refers to this one , // now that we are synchronized on the InternalTransaction . if ( transaction . internalTransaction != this ) { postOptmisticReplace ( managedObjectsToAdd , managedObjectsToReplace , managedObjectsToDelete , transaction ) ; if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . exit ( this , cclass , methodName , new Object [ ] { "via InvalidTransactionException" , transaction . internalTransaction } ) ; // Same behaviour as if the transaction was completed and replaced by // objectManagerState . dummyInternalTransaction . throw new InvalidStateException ( this , InternalTransaction . stateTerminated , InternalTransaction . stateNames [ InternalTransaction . stateTerminated ] ) ; } // if ( transaction . internalTransaction ! = this ) . try { // Is data logging required for this object in order to recover it ? if ( ! persistentTokensToAdd . isEmpty ( ) || ! persistentTokensToReplace . isEmpty ( ) || ! persistentTokensToDelete . isEmpty ( ) || ! persistentTokensToNotify . isEmpty ( ) ) { testState ( nextStateForInvolveOptimisticPersistentObject ) ; // Over estimate of reserved space . If we replace a ManagedObject previously involved in this // Transaction we will have already reserved some space . logSpaceReservedDelta = logSpaceReservedDelta + ( + persistentTokensToAdd . size ( ) + persistentTokensToReplace . size ( ) + persistentTokensToDelete . size ( ) + persistentTokensToNotify . size ( ) ) * Token . maximumSerializedSize ( ) ; if ( logSpaceReserved == 0 ) logSpaceReservedDelta = logSpaceReservedDelta + logSpaceReservedOverhead ; TransactionOptimisticReplaceLogRecord transactionOptimisticReplaceLogRecord = new TransactionOptimisticReplaceLogRecord ( this , persistentTokensToAdd , persistentTokensToReplace , persistentSerializedBytes , persistentTokensToDelete , persistentTokensToNotify ) ; // If we throw an exception in here no state change has been done . logSequenceNumber = objectManagerState . logOutput . writeNext ( transactionOptimisticReplaceLogRecord , logSpaceReservedDelta , true , false ) ; // The previous testState ( ) call means we should not fail in here . setState ( nextStateForInvolveOptimisticPersistentObject ) ; // Make the state change . logSpaceReserved = logSpaceReserved + logSpaceReservedDelta ; } else { // No persistent tokens mentioned then . testState ( nextStateForInvolveOptimisticNonPersistentObject ) ; setState ( nextStateForInvolveOptimisticNonPersistentObject ) ; logSequenceNumber = objectManagerState . getDummyLogSequenceNumber ( ) ; } // else non Persistent . } catch ( ObjectManagerException exception ) { // The write was not done . // No FFDC Code Needed . ObjectManager . ffdc . processException ( this , cclass , "optimisticReplace" , exception , "1:1371:1.41" ) ; postOptmisticReplace ( managedObjectsToAdd , managedObjectsToReplace , managedObjectsToDelete , transaction ) ; if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . exit ( this , cclass , methodName , new Object [ ] { exception } ) ; throw exception ; } // catch ( ObjectManagerException . . . // Any logging must be done before the following operations on the ManagedObjects as we will // redrive these after recovery from the log on a restart . // We have now sucessfully logged the serialized bytes . loggedSerializedBytes . putAll ( newLoggedSerializedBytes ) ; // Drive the postAdd method for the ManagedObjects . if ( managedObjectsToAdd != null ) { for ( java . util . Iterator managedObjectIterator = managedObjectsToAdd . iterator ( ) ; managedObjectIterator . hasNext ( ) ; ) { ManagedObject managedObject = ( ManagedObject ) managedObjectIterator . next ( ) ; // The ManagedObject is now included in the transaction . includedManagedObjects . put ( managedObject . owningToken , managedObject ) ; // Remember which logSequenceNumber when we last logged it . logSequenceNumbers . put ( managedObject . owningToken , new Long ( logSequenceNumber ) ) ; managedObjectSequenceNumbers . put ( managedObject . owningToken , new Long ( managedObject . getUpdateSequence ( ) ) ) ; managedObject . postAdd ( transaction , true ) ; } // for . . . managedObjectsToAdd . } // if ( managedObjectsToAdd ! = null ) . if ( managedObjectsToReplace != null ) { for ( java . util . Iterator managedObjectIterator = managedObjectsToReplace . iterator ( ) ; managedObjectIterator . hasNext ( ) ; ) { ManagedObject managedObject = ( ManagedObject ) managedObjectIterator . next ( ) ; // The ManagedObject is now included in the transaction . includedManagedObjects . put ( managedObject . owningToken , managedObject ) ; // Remember which logSequenceNumber when we last logged it . logSequenceNumbers . put ( managedObject . owningToken , new Long ( logSequenceNumber ) ) ; managedObjectSequenceNumbers . put ( managedObject . owningToken , new Long ( managedObject . getUpdateSequence ( ) ) ) ; // See comment in ManagedObject . optimisticReplaceCommit ( ) . // In principle the managed object here could // also have been deleted by a subsequent transaction . But so far I have not seen this happen . managedObject . postOptimisticReplace ( transaction , true ) ; } // for . . . managedObjectsToReplace . } // if ( managedObjectsToReplace ! = null ) . // Drive the postDelete method for the ManagedObjects . if ( managedObjectsToDelete != null ) { for ( java . util . Iterator managedObjectIterator = managedObjectsToDelete . iterator ( ) ; managedObjectIterator . hasNext ( ) ; ) { ManagedObject managedObject = ( ManagedObject ) managedObjectIterator . next ( ) ; // The ManagedObject is now included in the transaction . includedManagedObjects . put ( managedObject . owningToken , managedObject ) ; // Remember which logSequenceNumber when we last logged it . logSequenceNumbers . put ( managedObject . owningToken , new Long ( logSequenceNumber ) ) ; managedObjectSequenceNumbers . put ( managedObject . owningToken , new Long ( managedObject . getUpdateSequence ( ) ) ) ; managedObject . postDelete ( transaction , true ) ; } // for . . . managedObjectsToDelete . } // if ( managedObjectsToDelete ! = null ) . // Drive the optimisticReplaceLogged method for any ManagedObjects to Notify . if ( tokensToNotify != null ) { for ( java . util . Iterator tokenIterator = tokensToNotify . iterator ( ) ; tokenIterator . hasNext ( ) ; ) { Token token = ( Token ) tokenIterator . next ( ) ; if ( token . getObjectStore ( ) . getPersistence ( ) ) allPersistentTokensToNotify . add ( token ) ; token . getManagedObject ( ) . optimisticReplaceLogged ( transaction ) ; } // for . . . tokensToNotify . } // if ( tokensToReplace ! = null ) . } // synchronized ( this ) . if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . exit ( this , cclass , methodName ) ;
public class CdnClient { /** * Set HTTPS with certain configuration . * @ param domain Name of the domain . * @ param https The configuration of HTTPS . */ public void setHttpsConfig ( String domain , HttpsConfig https ) { } }
SetHttpsConfigRequest request = new SetHttpsConfigRequest ( ) . withDomain ( domain ) . withHttps ( https ) ; setHttpsConfig ( request ) ;
public class FactoryInterestPointAlgs { /** * Creates a SIFT detector */ public static SiftDetector sift ( @ Nullable ConfigSiftScaleSpace configSS , @ Nullable ConfigSiftDetector configDetector ) { } }
if ( configSS == null ) configSS = new ConfigSiftScaleSpace ( ) ; if ( configDetector == null ) configDetector = new ConfigSiftDetector ( ) ; NonMaxLimiter nonmax = FactoryFeatureExtractor . nonmaxLimiter ( configDetector . extract , configDetector . maxFeaturesPerScale ) ; SiftScaleSpace ss = new SiftScaleSpace ( configSS . firstOctave , configSS . lastOctave , configSS . numScales , configSS . sigma0 ) ; return new SiftDetector ( ss , configDetector . edgeR , nonmax ) ;
public class Alignments { /** * Return the gaps in the specified gapped symbol list as 0 - based [ closed , open ) ranges . * @ param gappedSymbols gapped symbol list , must not be null * @ return the gaps in the specified gapped symbol list as 0 - based [ closed , open ) ranges */ public static List < Range < Long > > gaps ( final GappedSymbolList gappedSymbols ) { } }
checkNotNull ( gappedSymbols ) ; List < Range < Long > > gaps = new ArrayList < Range < Long > > ( ) ; int gapStart = - 1 ; for ( int i = 1 , length = gappedSymbols . length ( ) + 1 ; i < length ; i ++ ) { if ( isGapSymbol ( gappedSymbols . symbolAt ( i ) ) ) { if ( gapStart < 0 ) { gapStart = i ; } } else { if ( gapStart > 0 ) { // biojava coordinates are 1 - based gaps . add ( Range . closedOpen ( Long . valueOf ( gapStart - 1L ) , Long . valueOf ( i - 1L ) ) ) ; gapStart = - 1 ; } } } if ( gapStart > 0 ) { gaps . add ( Range . closedOpen ( Long . valueOf ( gapStart - 1L ) , Long . valueOf ( gappedSymbols . length ( ) ) ) ) ; } return gaps ;
public class GSMPImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public void eUnset ( int featureID ) { } }
switch ( featureID ) { case AfplibPackage . GSMP__PREC : setPREC ( PREC_EDEFAULT ) ; return ; } super . eUnset ( featureID ) ;
public class CPInstancePersistenceImpl { /** * Returns a range of all the cp instances where companyId = & # 63 ; . * Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link CPInstanceModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order . * @ param companyId the company ID * @ param start the lower bound of the range of cp instances * @ param end the upper bound of the range of cp instances ( not inclusive ) * @ return the range of matching cp instances */ @ Override public List < CPInstance > findByCompanyId ( long companyId , int start , int end ) { } }
return findByCompanyId ( companyId , start , end , null ) ;
public class MatchPatternIterator { /** * Initialize the context values for this expression * after it is cloned . * @ param context The XPath runtime context for this * transformation . */ public void setRoot ( int context , Object environment ) { } }
super . setRoot ( context , environment ) ; m_traverser = m_cdtm . getAxisTraverser ( m_superAxis ) ;
public class InmemQueue { /** * Puts a message to the queue buffer . * @ param msg * @ throws QueueException . QueueIsFull * if the ring buffer is full */ protected void putToQueue ( IQueueMessage < ID , DATA > msg ) throws QueueException . QueueIsFull { } }
if ( ! queue . offer ( msg ) ) { throw new QueueException . QueueIsFull ( getBoundary ( ) ) ; }
public class BaseBigtableTableAdminClient { /** * Lists all snapshots associated with the specified cluster . * < p > Note : This is a private alpha release of Cloud Bigtable snapshots . This feature is not * currently available to most Cloud Bigtable customers . This feature might be changed in * backward - incompatible ways and is not recommended for production use . It is not subject to any * SLA or deprecation policy . * < p > Sample code : * < pre > < code > * try ( BaseBigtableTableAdminClient baseBigtableTableAdminClient = BaseBigtableTableAdminClient . create ( ) ) { * ClusterName parent = ClusterName . of ( " [ PROJECT ] " , " [ INSTANCE ] " , " [ CLUSTER ] " ) ; * for ( Snapshot element : baseBigtableTableAdminClient . listSnapshots ( parent . toString ( ) ) . iterateAll ( ) ) { * / / doThingsWith ( element ) ; * < / code > < / pre > * @ param parent The unique name of the cluster for which snapshots should be listed . Values are * of the form ` projects / & lt ; project & gt ; / instances / & lt ; instance & gt ; / clusters / & lt ; cluster & gt ; ` . * Use ` & lt ; cluster & gt ; = ' - ' ` to list snapshots for all clusters in an instance , e . g . , * ` projects / & lt ; project & gt ; / instances / & lt ; instance & gt ; / clusters / - ` . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ public final ListSnapshotsPagedResponse listSnapshots ( String parent ) { } }
ListSnapshotsRequest request = ListSnapshotsRequest . newBuilder ( ) . setParent ( parent ) . build ( ) ; return listSnapshots ( request ) ;
public class FacesServlet { /** * < p > Acquire the factory instances we will require . < / p > * @ throws ServletException if , for any reason , the startup of * this Faces application failed . This includes errors in the * config file that is parsed before or during the processing of * this < code > init ( ) < / code > method . */ public void init ( ServletConfig servletConfig ) throws ServletException { } }
// Save our ServletConfig instance this . servletConfig = servletConfig ; // Acquire our FacesContextFactory instance try { facesContextFactory = ( FacesContextFactory ) FactoryFinder . getFactory ( FactoryFinder . FACES_CONTEXT_FACTORY ) ; } catch ( FacesException e ) { ResourceBundle rb = LOGGER . getResourceBundle ( ) ; String msg = rb . getString ( "severe.webapp.facesservlet.init_failed" ) ; Throwable rootCause = ( e . getCause ( ) != null ) ? e . getCause ( ) : e ; LOGGER . log ( Level . SEVERE , msg , rootCause ) ; throw new UnavailableException ( msg ) ; } // Acquire our Lifecycle instance try { LifecycleFactory lifecycleFactory = ( LifecycleFactory ) FactoryFinder . getFactory ( FactoryFinder . LIFECYCLE_FACTORY ) ; String lifecycleId ; // First look in the servlet init - param set if ( null == ( lifecycleId = servletConfig . getInitParameter ( LIFECYCLE_ID_ATTR ) ) ) { // If not found , look in the context - param set lifecycleId = servletConfig . getServletContext ( ) . getInitParameter ( LIFECYCLE_ID_ATTR ) ; } if ( lifecycleId == null ) { lifecycleId = LifecycleFactory . DEFAULT_LIFECYCLE ; } lifecycle = lifecycleFactory . getLifecycle ( lifecycleId ) ; initHttpMethodValidityVerification ( ) ; } catch ( FacesException e ) { Throwable rootCause = e . getCause ( ) ; if ( rootCause == null ) { throw e ; } else { throw new ServletException ( e . getMessage ( ) , rootCause ) ; } }
public class ElementPlugin { /** * Forward command events to first level children of the container . * @ param event The command event . */ @ EventHandler ( "command" ) private void onCommand ( CommandEvent event ) { } }
if ( isEnabled ( ) ) { for ( BaseComponent child : container . getChildren ( ) ) { EventUtil . send ( event , child ) ; if ( event . isStopped ( ) ) { break ; } } }
public class RelativeToEasterSundayParser { /** * Parses relative to Easter Sunday holidays . */ public void parse ( final int nYear , final HolidayMap aHolidayMap , final Holidays aConfig ) { } }
for ( final RelativeToEasterSunday aDay : aConfig . getRelativeToEasterSunday ( ) ) { if ( ! isValid ( aDay , nYear ) ) continue ; final ChronoLocalDate aEasterSunday = getEasterSunday ( nYear , aDay . getChronology ( ) ) ; aEasterSunday . plus ( aDay . getDays ( ) , ChronoUnit . DAYS ) ; final String sPropertiesKey = "christian." + aDay . getDescriptionPropertiesKey ( ) ; addChrstianHoliday ( aEasterSunday , sPropertiesKey , XMLHolidayHelper . getType ( aDay . getLocalizedType ( ) ) , aHolidayMap ) ; }
public class PushGateway { /** * Deletes metrics from the Pushgateway . * This uses the DELETE HTTP method . * @ deprecated use { @ link # delete ( String , Map ) } */ @ Deprecated public void delete ( String job , String instance ) throws IOException { } }
delete ( job , Collections . singletonMap ( "instance" , instance ) ) ;
public class JaxbSerializer { /** * { @ inheritDoc } */ @ Override public ByteBuffer toByteBuffer ( Object obj ) { } }
if ( obj == null ) { return null ; } ByteArrayOutputStream buffer = new ByteArrayOutputStream ( ) ; try { XMLStreamWriter writer = createStreamWriter ( buffer ) ; marshaller . get ( ) . marshal ( obj , writer ) ; writer . flush ( ) ; writer . close ( ) ; } catch ( JAXBException e ) { throw new HectorSerializationException ( "Object to serialize " + obj + " does not seem compatible with the configured JaxbContext;" + " note this Serializer works only with JAXBable objects." , e ) ; } catch ( XMLStreamException e ) { throw new HectorSerializationException ( "Exception occurred writing XML stream." , e ) ; } return ByteBuffer . wrap ( buffer . toByteArray ( ) ) ;
public class CompoundObjectType { /** * Answer a list of types of this CompoundObjectType . * @ param noAbstractTypes if true , no abstract types and no interfaces are answered , * else all types of the CompoundObjectType are answered . * @ return a list of java Classes */ public List < Class < ? > > getTypes ( boolean noAbstractTypes ) { } }
List < Class < ? > > typeList = new ArrayList < Class < ? > > ( ) ; this . addType ( typeList , noAbstractTypes ) ; return typeList ;
public class SearchTransitGatewayRoutesResult { /** * Information about the routes . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setRoutes ( java . util . Collection ) } or { @ link # withRoutes ( java . util . Collection ) } if you want to override the * existing values . * @ param routes * Information about the routes . * @ return Returns a reference to this object so that method calls can be chained together . */ public SearchTransitGatewayRoutesResult withRoutes ( TransitGatewayRoute ... routes ) { } }
if ( this . routes == null ) { setRoutes ( new com . amazonaws . internal . SdkInternalList < TransitGatewayRoute > ( routes . length ) ) ; } for ( TransitGatewayRoute ele : routes ) { this . routes . add ( ele ) ; } return this ;
public class DefaultArtifactoryClient { /** * / / / / / Helpers */ private ResponseEntity < String > makeRestCall ( String instanceUrl , String suffix ) { } }
ResponseEntity < String > response = null ; try { response = restOperations . exchange ( joinUrl ( instanceUrl , artifactorySettings . getEndpoint ( ) , suffix ) , HttpMethod . GET , new HttpEntity < > ( createHeaders ( instanceUrl ) ) , String . class ) ; } catch ( RestClientException re ) { LOGGER . error ( "Error with REST url: " + joinUrl ( instanceUrl , artifactorySettings . getEndpoint ( ) , suffix ) ) ; LOGGER . error ( re . getMessage ( ) ) ; } return response ;
public class CmsSolrQuery { /** * Sets the text . < p > * @ param text the text to set */ public void setText ( String text ) { } }
m_text = text ; if ( CmsStringUtil . isNotEmptyOrWhitespaceOnly ( text ) ) { setQuery ( createTextQuery ( text ) ) ; }
public class CoreSynonymDictionaryEx { /** * 将分词结果转换为同义词列表 * @ param sentence 句子 * @ param withUndefinedItem 是否保留词典中没有的词语 * @ return */ public static List < Long [ ] > convert ( List < Term > sentence , boolean withUndefinedItem ) { } }
List < Long [ ] > synonymItemList = new ArrayList < Long [ ] > ( sentence . size ( ) ) ; for ( Term term : sentence ) { // 除掉停用词 if ( term . nature == null ) continue ; String nature = term . nature . toString ( ) ; char firstChar = nature . charAt ( 0 ) ; switch ( firstChar ) { case 'm' : { if ( ! TextUtility . isAllChinese ( term . word ) ) continue ; } break ; case 'w' : { continue ; } } // 停用词 if ( CoreStopWordDictionary . contains ( term . word ) ) continue ; Long [ ] item = get ( term . word ) ; // logger . trace ( " { } { } " , wordResult . word , Arrays . toString ( item ) ) ; if ( item == null ) { if ( withUndefinedItem ) { item = new Long [ ] { Long . MAX_VALUE / 3 } ; synonymItemList . add ( item ) ; } } else { synonymItemList . add ( item ) ; } } return synonymItemList ;
public class Element { /** * Search for the attribute " id " and return the value . * @ return the id of this element or null when not found */ public String getElementId ( ) { } }
for ( Entry < String , String > attribute : attributes . entrySet ( ) ) { if ( attribute . getKey ( ) . equalsIgnoreCase ( "id" ) ) { return attribute . getValue ( ) ; } } return null ;
public class ComponentAccess { /** * Call an method by Annotation . * @ param o the object to call . * @ param ann the annotation * @ param lazy if true , the a missing annotation is OK . if false * the annotation has to be present or a Runtime exception is thrown . */ public static void callAnnotated ( Object o , Class < ? extends Annotation > ann , boolean lazy ) { } }
try { getMethodOfInterest ( o , ann ) . invoke ( o ) ; } catch ( IllegalAccessException ex ) { throw new RuntimeException ( ex ) ; } catch ( InvocationTargetException ex ) { throw new RuntimeException ( ex . getCause ( ) ) ; } catch ( IllegalArgumentException ex ) { if ( ! lazy ) { throw new RuntimeException ( ex . getMessage ( ) ) ; } }
public class HttpObjectEncoder { /** * Writes an { @ link HttpData } . */ public final ChannelFuture writeData ( int id , int streamId , HttpData data , boolean endStream ) { } }
assert eventLoop ( ) . inEventLoop ( ) ; if ( closed ) { ReferenceCountUtil . safeRelease ( data ) ; return newClosedSessionFuture ( ) ; } return doWriteData ( id , streamId , data , endStream ) ;
public class ViewDragHelper { /** * Check if we ' ve crossed a reasonable touch slop for the given child view . * If the child cannot be dragged along the horizontal or vertical axis , motion * along that axis will not count toward the slop check . * @ param child Child to check * @ param dx Motion since initial position along X axis * @ param dy Motion since initial position along Y axis * @ return true if the touch slop has been crossed */ private boolean checkTouchSlop ( View child , float dx , float dy ) { } }
if ( child == null ) { return false ; } final boolean checkHorizontal = mCallback . getViewHorizontalDragRange ( child ) > 0 ; final boolean checkVertical = mCallback . getViewVerticalDragRange ( child ) > 0 ; if ( checkHorizontal && checkVertical ) { return dx * dx + dy * dy > mTouchSlop * mTouchSlop ; } else if ( checkHorizontal ) { return Math . abs ( dx ) > mTouchSlop ; } else if ( checkVertical ) { return Math . abs ( dy ) > mTouchSlop ; } return false ;
public class OffsetDateTime { /** * Returns a copy of this { @ code OffsetDateTime } with the specified number of days subtracted . * This method subtracts the specified amount from the days field decrementing the * month and year fields as necessary to ensure the result remains valid . * The result is only invalid if the maximum / minimum year is exceeded . * For example , 2008-12-31 minus one day would result in 2009-01-01. * This instance is immutable and unaffected by this method call . * @ param days the days to subtract , may be negative * @ return an { @ code OffsetDateTime } based on this date - time with the days subtracted , not null * @ throws DateTimeException if the result exceeds the supported date range */ public OffsetDateTime minusDays ( long days ) { } }
return ( days == Long . MIN_VALUE ? plusDays ( Long . MAX_VALUE ) . plusDays ( 1 ) : plusDays ( - days ) ) ;
public class BogusExceptionDeclaration { /** * implements the visitor to see if the method declares that it throws any checked exceptions . * @ param obj * the context object of the currently parsed code block */ @ Override public void visitCode ( Code obj ) { } }
Method method = getMethod ( ) ; if ( method . isSynthetic ( ) ) { return ; } declaredCheckedExceptions . clear ( ) ; stack . resetForMethodEntry ( this ) ; ExceptionTable et = method . getExceptionTable ( ) ; if ( et != null ) { if ( classIsFinal || classIsAnonymous || method . isStatic ( ) || method . isPrivate ( ) || method . isFinal ( ) || ( ( Values . CONSTRUCTOR . equals ( method . getName ( ) ) && ! isAnonymousInnerCtor ( method , getThisClass ( ) ) ) ) ) { String [ ] exNames = et . getExceptionNames ( ) ; for ( String exName : exNames ) { try { JavaClass exCls = Repository . lookupClass ( exName ) ; if ( ! exCls . instanceOf ( runtimeExceptionClass ) ) { declaredCheckedExceptions . add ( exName ) ; } } catch ( ClassNotFoundException cnfe ) { bugReporter . reportMissingClass ( cnfe ) ; } } if ( ! declaredCheckedExceptions . isEmpty ( ) ) { try { super . visitCode ( obj ) ; if ( ! declaredCheckedExceptions . isEmpty ( ) ) { BugInstance bi = new BugInstance ( this , BugType . BED_BOGUS_EXCEPTION_DECLARATION . name ( ) , NORMAL_PRIORITY ) . addClass ( this ) . addMethod ( this ) . addSourceLine ( this , 0 ) ; for ( String ex : declaredCheckedExceptions ) { bi . addString ( ex . replaceAll ( "/" , "." ) ) ; } bugReporter . reportBug ( bi ) ; } } catch ( StopOpcodeParsingException e ) { // no exceptions left } } } String [ ] exNames = et . getExceptionNames ( ) ; for ( int i = 0 ; i < ( exNames . length - 1 ) ; i ++ ) { try { JavaClass exCls1 = Repository . lookupClass ( exNames [ i ] ) ; for ( int j = i + 1 ; j < exNames . length ; j ++ ) { JavaClass exCls2 = Repository . lookupClass ( exNames [ j ] ) ; JavaClass childEx ; JavaClass parentEx ; if ( exCls1 . instanceOf ( exCls2 ) ) { childEx = exCls1 ; parentEx = exCls2 ; } else if ( exCls2 . instanceOf ( exCls1 ) ) { childEx = exCls2 ; parentEx = exCls1 ; } else { continue ; } if ( ! parentEx . equals ( exceptionClass ) ) { bugReporter . reportBug ( new BugInstance ( this , BugType . BED_HIERARCHICAL_EXCEPTION_DECLARATION . name ( ) , NORMAL_PRIORITY ) . addClass ( this ) . addMethod ( this ) . addString ( childEx . getClassName ( ) + " derives from " + parentEx . getClassName ( ) ) ) ; return ; } } } catch ( ClassNotFoundException cnfe ) { bugReporter . reportMissingClass ( cnfe ) ; } } }
public class ShellCommand { /** * @ Override * protected void initBootOptions ( ) * super . initBootOptions ( ) ; * addValueOption ( " input " , " FILE " , " input script " ) . tiny ( " i " ) ; */ @ Override public ExitCode doCommandImpl ( ArgsBase args ) // , ConfigBoot resinBoot ) throws CommandLineException { } }
Console console = System . console ( ) ; /* if ( args . env ( ) . isEmbedded ( ) ) { console = null ; */ String fileName = args . getArg ( "input" ) ; if ( fileName != null ) { throw new UnsupportedOperationException ( ) ; /* PathImpl pwd = VfsOld . getPwd ( ) ; PathImpl scriptPath = VfsOld . lookup ( fileName ) ; try ( ReadStream is = scriptPath . openRead ( ) ) { / / XXX : change to _ _ DIR _ _ and _ _ FILE _ _ VfsOld . setPwd ( scriptPath . getParent ( ) ) ; doBatch ( is , args ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } finally { VfsOld . setPwd ( pwd ) ; return ExitCode . OK ; */ } else if ( console != null ) { System . err . println ( L . l ( "{0}\n{1}" , Version . getFullVersion ( ) , Version . getCopyright ( ) ) ) ; System . err . println ( L . l ( "Use 'help' for help and 'exit' to exit the {0} shell." , args . programName ( ) ) ) ; doConsole ( console , args ) ; } else { doBatch ( args ) ; } return ExitCode . OK ;
public class Asm { /** * Create qword ( 8 Bytes ) pointer operand . */ public static final Mem qword_ptr_abs ( long target , Register index , int shift , long disp , SEGMENT segmentPrefix ) { } }
return _ptr_build_abs ( target , index , shift , disp , segmentPrefix , SIZE_QWORD ) ;
public class HashtableOnDisk { /** * Write the object pointer to the table . If we are in the process of * doubling , this method finds the correct table to update ( since there are * two tables active during the doubling process ) . */ void updateHtindex ( int index , long value , int tableid ) { } }
if ( tableid == header . currentTableId ( ) ) { htindex [ index ] = value ; } else { new_htindex [ index ] = value ; }
public class Delta { /** * Undo differential coding ( in - place ) . Effectively computes a prefix * sum . Like inverseDelta , only faster . * @ param data * to be modified */ public static void fastinverseDelta ( int [ ] data ) { } }
int sz0 = data . length / 4 * 4 ; int i = 1 ; if ( sz0 >= 4 ) { int a = data [ 0 ] ; for ( ; i < sz0 - 4 ; i += 4 ) { a = data [ i ] += a ; a = data [ i + 1 ] += a ; a = data [ i + 2 ] += a ; a = data [ i + 3 ] += a ; } } for ( ; i != data . length ; ++ i ) { data [ i ] += data [ i - 1 ] ; }
public class Transaction { /** * Returns the result set from the provided query . Holds a pessimistic lock on all returned * documents . * @ return The contents of the Document at this DocumentReference . */ @ Nonnull public ApiFuture < QuerySnapshot > get ( @ Nonnull Query query ) { } }
Preconditions . checkState ( isEmpty ( ) , READ_BEFORE_WRITE_ERROR_MSG ) ; return query . get ( transactionId ) ;
public class FileHandler { /** * javadoc . */ private void configure ( ) { } }
LogManager manager = LogManager . getLogManager ( ) ; String cname = getClass ( ) . getName ( ) ; pattern = manager . getStringProperty ( cname + ".pattern" , "%h/java%u.log" ) ; limit = manager . getIntProperty ( cname + ".limit" , 0 ) ; if ( limit < 0 ) { limit = 0 ; } count = manager . getIntProperty ( cname + ".count" , 1 ) ; if ( count <= 0 ) { count = 1 ; } append = manager . getBooleanProperty ( cname + ".append" , false ) ; setLevel ( manager . getLevelProperty ( cname + ".level" , Level . ALL ) ) ; setFilter ( manager . getFilterProperty ( cname + ".filter" , null ) ) ; setFormatter ( manager . getFormatterProperty ( cname + ".formatter" , new XMLFormatter ( ) ) ) ; try { setEncoding ( manager . getStringProperty ( cname + ".encoding" , null ) ) ; } catch ( Exception ex ) { try { setEncoding ( null ) ; } catch ( Exception ex2 ) { // doing a setEncoding with null should always work . // assert false ; } }
public class CheckMoveHandler { /** * The Field has Changed . * @ param bDisplayOption If true , display the change . * @ param iMoveMode The type of move being done ( init / read / screen ) . * @ return The error code ( or NORMAL _ RETURN if okay ) . */ public int moveIt ( boolean bDisplayOption , int iMoveMode ) { } }
( ( BaseField ) m_fldDest . getField ( ) ) . setEnabled ( ! this . getOwner ( ) . getState ( ) ) ; if ( iMoveMode != DBConstants . SCREEN_MOVE ) return DBConstants . NORMAL_RETURN ; if ( this . getOwner ( ) . getState ( ) == false ) { if ( bDisplayOption ) ( ( BaseField ) m_fldDest . getField ( ) ) . displayField ( ) ; // Redisplay return DBConstants . NORMAL_RETURN ; } else return super . moveIt ( bDisplayOption , iMoveMode ) ; // Move it !
public class SARLAnnotationUtil { /** * Extract the integer value of the given annotation , if it exists . * @ param op the annotated element . * @ param annotationType the type of the annotation to consider * @ return the value of the annotation , or { @ code null } if no annotation or no * value . * @ since 0.6 */ public Integer findIntValue ( JvmAnnotationTarget op , Class < ? extends Annotation > annotationType ) { } }
final JvmAnnotationReference reference = this . lookup . findAnnotation ( op , annotationType ) ; if ( reference != null ) { return findIntValue ( reference ) ; } return null ;
import java . util . regex . Pattern ; class EliminateExtraSpaces { /** * This function replaces multiple spaces in a string with a single space using regular expressions . * Examples : * > > > eliminate _ extra _ spaces ( ' Google Assistant ' ) * ' Google Assistant ' * > > > eliminate _ extra _ spaces ( ' Quad Core ' ) * ' Quad Core ' * > > > eliminate _ extra _ spaces ( ' ChromeCast Built - in ' ) * ' ChromeCast Built - in ' * @ param inputText A string that may contain multiple spaces . * @ return A string with multiple spaces replaced with single space . */ public static String eliminateExtraSpaces ( String inputText ) { } }
Pattern multipleSpaces = Pattern . compile ( "\\s+" ) ; return multipleSpaces . matcher ( inputText ) . replaceAll ( " " ) ;
public class MetricRegistryImpl { /** * Return the { @ link Counter } registered under this name ; or create and register * a new { @ link Counter } if none is registered . * @ param name the name of the metric * @ return a new or pre - existing { @ link Counter } */ @ Override public Counter counter ( String name ) { } }
return this . counter ( new Metadata ( name , MetricType . COUNTER ) ) ;
public class Ifc2x3tc1PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getIfcSweptDiskSolid ( ) { } }
if ( ifcSweptDiskSolidEClass == null ) { ifcSweptDiskSolidEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc2x3tc1Package . eNS_URI ) . getEClassifiers ( ) . get ( 586 ) ; } return ifcSweptDiskSolidEClass ;
public class AWSRAMClient { /** * Gets the policies for the specifies resources . * @ param getResourcePoliciesRequest * @ return Result of the GetResourcePolicies operation returned by the service . * @ throws MalformedArnException * The format of an Amazon Resource Name ( ARN ) is not valid . * @ throws InvalidNextTokenException * The specified value for NextToken is not valid . * @ throws InvalidParameterException * A parameter is not valid . * @ throws ServerInternalException * The service could not respond to the request due to an internal problem . * @ throws ServiceUnavailableException * The service is not available . * @ sample AWSRAM . GetResourcePolicies * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / ram - 2018-01-04 / GetResourcePolicies " target = " _ top " > AWS API * Documentation < / a > */ @ Override public GetResourcePoliciesResult getResourcePolicies ( GetResourcePoliciesRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeGetResourcePolicies ( request ) ;
public class AmazonWorkLinkClient { /** * Updates fleet metadata , such as DisplayName . * @ param updateFleetMetadataRequest * @ return Result of the UpdateFleetMetadata operation returned by the service . * @ throws UnauthorizedException * You are not authorized to perform this action . * @ throws InternalServerErrorException * The service is temporarily unavailable . * @ throws InvalidRequestException * The request is not valid . * @ throws ResourceNotFoundException * The requested resource was not found . * @ throws TooManyRequestsException * The number of requests exceeds the limit . * @ sample AmazonWorkLink . UpdateFleetMetadata * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / worklink - 2018-09-25 / UpdateFleetMetadata " target = " _ top " > AWS * API Documentation < / a > */ @ Override public UpdateFleetMetadataResult updateFleetMetadata ( UpdateFleetMetadataRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeUpdateFleetMetadata ( request ) ;
public class PluginService { /** * Returns the set of plugins with the given query parameters . * @ param queryParams The query parameters * @ return The set of plugins */ public Collection < Plugin > list ( List < String > queryParams ) { } }
return HTTP . GET ( "/v2/plugins.json" , null , queryParams , PLUGINS ) . get ( ) ;
public class Graphs { /** * Returns a synchronized ( thread - safe ) { @ link UndirectedGraph } backed by the specified Graph . * It is imperative that the user manually synchronize on the returned graph when iterating over iterable collections : * < pre > * Graph syncGraph = synchronize ( graph ) ; * synchronized ( syncGraph ) { * for ( Vertex v : g . getVertices ( ) ) / / Must be in synchronized block * foo ( v ) * < / pre > * Failure to follow this advice may result in non - deterministic behavior . * The returned { @ link Graph } will be serializable if the specified { @ link Graph } is serializable . * @ param < V > the Graph vertices type * @ param < E > the Graph edges type * @ param graph the input { @ link Graph } * @ return the syncronyzed graph */ public static < V , E > Graph < V , E > synchronize ( final UndirectedGraph < V , E > graph ) { } }
UndirectedGraph < V , E > checkedGraph = checkNotNull ( graph , "Impossible to synchronize null Graph." ) ; return new SynchronizedUndirectedGraph < V , E > ( checkedGraph ) ;
public class PdfContentByte { /** * Implements a link to another document . * @ param filename the filename for the remote document * @ param name the name to jump to * @ param llx the lower left x corner of the activation area * @ param lly the lower left y corner of the activation area * @ param urx the upper right x corner of the activation area * @ param ury the upper right y corner of the activation area */ public void remoteGoto ( String filename , String name , float llx , float lly , float urx , float ury ) { } }
pdf . remoteGoto ( filename , name , llx , lly , urx , ury ) ;
public class ObservableHttp { /** * Execute request using { @ link HttpAsyncRequestProducer } to define HTTP Method , URI and payload ( if applicable ) . * If the response is chunked ( or flushed progressively such as with < i > text / event - stream < / i > < a href = " http : / / www . w3 . org / TR / 2009 / WD - eventsource - 20091029 / " > Server - Sent Events < / a > ) this will call * { @ link Observer # onNext } multiple times . * Use { @ code HttpAsyncMethods . create * } factory methods to create { @ link HttpAsyncRequestProducer } instances . * A client can be retrieved like this : * < pre > { @ code CloseableHttpAsyncClient httpclient = HttpAsyncClients . createDefault ( ) ; } < / pre > * A client with custom configurations can be created like this : * < pre > { @ code * final RequestConfig requestConfig = RequestConfig . custom ( ) * . setSocketTimeout ( 3000) * . setConnectTimeout ( 3000 ) . build ( ) ; * final CloseableHttpAsyncClient httpclient = HttpAsyncClients . custom ( ) * . setDefaultRequestConfig ( requestConfig ) * . setMaxConnPerRoute ( 20) * . setMaxConnTotal ( 50) * . build ( ) ; * httpclient . start ( ) ; * } < / pre > * @ param requestProducer * @ param client * @ return the observable HTTP response stream */ public static ObservableHttp < ObservableHttpResponse > createRequest ( final HttpAsyncRequestProducer requestProducer , final HttpAsyncClient client ) { } }
return createRequest ( requestProducer , client , new BasicHttpContext ( ) ) ;
public class JSONDeserializer { /** * Parses the JSON string and returns the IJSONSerializable . * Use this method if you need to parse JSON that may contain one or more errors . * < pre > * ValidationContext ctx = new ValidationContext ( ) ; * ctx . setValidate ( true ) ; * ctx . setStopOnError ( false ) ; / / find all errors * IJSONSerializable < ? > node = JSONDeserializer . getInstance ( ) . fromString ( jsonString , ctx ) ; * if ( ctx . getErrorCount ( ) > 0) * Console . log ( ctx . getDebugString ( ) ) ; * < / pre > * @ param string JSON string as produced by { @ link IJSONSerializable # toJSONString ( ) } * @ param ctx ValidationContext * @ return IJSONSerializable */ public final IJSONSerializable < ? > fromString ( String string , final ValidationContext ctx ) { } }
try { ctx . push ( "fromString" ) ; if ( null == ( string = StringOps . toTrimOrNull ( string ) ) ) { ctx . addError ( "NULL JSON String" ) ; return null ; } final JSONValue value = JSONParser . parseStrict ( string ) ; if ( null == value ) { ctx . addError ( "NULL from JSONParser" ) ; return null ; } final JSONObject json = value . isObject ( ) ; if ( null == json ) { ctx . addError ( "Result is not a JSONObject" ) ; return null ; } return fromJSON ( json , ctx ) ; } catch ( final ValidationException e ) { return null ; }
public class FileUtils { /** * Loads a list of { @ link Symbol } s from a file , one - per - line , skipping lines starting with " # " as * comments . */ public static ImmutableList < Symbol > loadSymbolList ( final CharSource source ) throws IOException { } }
return SymbolUtils . listFrom ( loadStringList ( source ) ) ;
public class MalisisGui { /** * Display this { @ link MalisisGui } . * @ param cancelClose the wether or not to cancel the next Gui close event ( used for when the GUI is opened from command ) */ public void display ( boolean cancelClose ) { } }
setResolution ( ) ; if ( ! doConstruct ( ) ) return ; MalisisGui . cancelClose = cancelClose ; Minecraft . getMinecraft ( ) . displayGuiScreen ( this ) ;
public class AbstractLocalAlluxioCluster { /** * Configures and starts the worker ( s ) . */ public void startWorkers ( ) throws Exception { } }
mWorkers = new ArrayList < > ( ) ; for ( int i = 0 ; i < mNumWorkers ; i ++ ) { mWorkers . add ( WorkerProcess . Factory . create ( ) ) ; } for ( final WorkerProcess worker : mWorkers ) { Runnable runWorker = ( ) -> { try { worker . start ( ) ; } catch ( InterruptedException e ) { // this is expected } catch ( Exception e ) { // Log the exception as the RuntimeException will be caught and handled silently by // JUnit LOG . error ( "Start worker error" , e ) ; throw new RuntimeException ( e + " \n Start Worker Error \n" + e . getMessage ( ) , e ) ; } } ; Thread thread = new Thread ( runWorker ) ; thread . setName ( "WorkerThread-" + System . identityHashCode ( thread ) ) ; mWorkerThreads . add ( thread ) ; thread . start ( ) ; } for ( WorkerProcess worker : mWorkers ) { TestUtils . waitForReady ( worker ) ; }
public class ValidationResultList { /** * Count all items according to the provided filter . * @ param aFilter * Optional filter to use . May be < code > null < / code > . * @ return The number of errors matching the provided filter . */ @ Nonnegative public int getAllCount ( @ Nullable final Predicate < ? super IError > aFilter ) { } }
int ret = 0 ; for ( final ValidationResult aItem : this ) ret += aItem . getErrorList ( ) . getCount ( aFilter ) ; return ret ;
public class SbeTool { /** * Generate SBE encoding and decoding stubs for a target language . * @ param ir for the parsed specification . * @ param outputDirName directory into which code will be generated . * @ param targetLanguage for the generated code . * @ throws Exception if an error occurs while generating the code . */ public static void generate ( final Ir ir , final String outputDirName , final String targetLanguage ) throws Exception { } }
final TargetCodeGenerator targetCodeGenerator = TargetCodeGeneratorLoader . get ( targetLanguage ) ; final CodeGenerator codeGenerator = targetCodeGenerator . newInstance ( ir , outputDirName ) ; codeGenerator . generate ( ) ;
public class LocalSession { /** * Called from producers when sending a message * @ param message message to dispatch * @ throws JMSException */ public final void dispatch ( AbstractMessage message ) throws JMSException { } }
// Security LocalConnection conn = ( LocalConnection ) getConnection ( ) ; if ( conn . isSecurityEnabled ( ) ) { Destination destination = message . getJMSDestination ( ) ; if ( destination instanceof Queue ) { String queueName = ( ( Queue ) destination ) . getQueueName ( ) ; if ( conn . isRegisteredTemporaryQueue ( queueName ) ) { // OK , temporary destination } else if ( queueName . equals ( FFMQConstants . ADM_REQUEST_QUEUE ) ) { conn . checkPermission ( Resource . SERVER , Action . REMOTE_ADMIN ) ; } else if ( queueName . equals ( FFMQConstants . ADM_REPLY_QUEUE ) ) { // Only the internal admin thread can produce on this queue if ( conn . getSecurityContext ( ) != null ) throw new FFMQException ( "Access denied to administration queue " + queueName , "ACCESS_DENIED" ) ; } else { // Standard queue conn . checkPermission ( destination , Action . PRODUCE ) ; } } else if ( destination instanceof Topic ) { String topicName = ( ( Topic ) destination ) . getTopicName ( ) ; if ( conn . isRegisteredTemporaryTopic ( topicName ) ) { // OK , temporary destination } else { // Standard topic conn . checkPermission ( destination , Action . PRODUCE ) ; } } else throw new InvalidDestinationException ( "Unsupported destination : " + destination ) ; } if ( debugEnabled ) log . debug ( this + " [PUT] in " + message . getJMSDestination ( ) + " - " + message ) ; externalAccessLock . readLock ( ) . lock ( ) ; try { checkNotClosed ( ) ; pendingPuts . add ( message ) ; if ( ! transacted ) commitUpdates ( false , null , true ) ; // FIXME Async commit ? } finally { externalAccessLock . readLock ( ) . unlock ( ) ; }
public class AWSCodePipelineClient { /** * Represents the success of a job as returned to the pipeline by a job worker . Only used for custom actions . * @ param putJobSuccessResultRequest * Represents the input of a PutJobSuccessResult action . * @ return Result of the PutJobSuccessResult operation returned by the service . * @ throws ValidationException * The validation was specified in an invalid format . * @ throws JobNotFoundException * The specified job was specified in an invalid format or cannot be found . * @ throws InvalidJobStateException * The specified job state was specified in an invalid format . * @ sample AWSCodePipeline . PutJobSuccessResult * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / codepipeline - 2015-07-09 / PutJobSuccessResult " * target = " _ top " > AWS API Documentation < / a > */ @ Override public PutJobSuccessResultResult putJobSuccessResult ( PutJobSuccessResultRequest request ) { } }
request = beforeClientExecution ( request ) ; return executePutJobSuccessResult ( request ) ;
public class AbstractListPreference { /** * Sets the values , which correspond to the entries of the list , which is shown by the * preference . * @ param entryValues * The values , which should be set , as a { @ link CharSequence } array . The values may not * be null and the array ' s length must be equal to the number of list items */ public final void setEntryValues ( @ NonNull final CharSequence [ ] entryValues ) { } }
Condition . INSTANCE . ensureNotNull ( entryValues , "The entry values may not be null" ) ; this . entryValues = entryValues ;
public class AbstractActivityContextBuilder { /** * Initialize the aspect rule registry . * @ param assistant the context rule assistant */ private void initAspectRuleRegistry ( ContextRuleAssistant assistant ) { } }
AspectRuleRegistry aspectRuleRegistry = assistant . getAspectRuleRegistry ( ) ; BeanRuleRegistry beanRuleRegistry = assistant . getBeanRuleRegistry ( ) ; TransletRuleRegistry transletRuleRegistry = assistant . getTransletRuleRegistry ( ) ; AspectAdviceRulePostRegister sessionScopeAspectAdviceRulePostRegister = new AspectAdviceRulePostRegister ( ) ; for ( AspectRule aspectRule : aspectRuleRegistry . getAspectRules ( ) ) { PointcutRule pointcutRule = aspectRule . getPointcutRule ( ) ; if ( pointcutRule != null ) { Pointcut pointcut = PointcutFactory . createPointcut ( pointcutRule ) ; aspectRule . setPointcut ( pointcut ) ; } if ( aspectRule . getJoinpointTargetType ( ) == JoinpointTargetType . SESSION ) { sessionScopeAspectAdviceRulePostRegister . register ( aspectRule ) ; } } AspectAdviceRulePreRegister preRegister = new AspectAdviceRulePreRegister ( aspectRuleRegistry ) ; preRegister . register ( beanRuleRegistry ) ; preRegister . register ( transletRuleRegistry ) ; // check invalid pointcut pattern boolean pointcutPatternVerifiable = assistant . isPointcutPatternVerifiable ( ) ; if ( pointcutPatternVerifiable || log . isDebugEnabled ( ) ) { int invalidPointcutPatterns = 0 ; for ( AspectRule aspectRule : aspectRuleRegistry . getAspectRules ( ) ) { Pointcut pointcut = aspectRule . getPointcut ( ) ; if ( pointcut != null ) { List < PointcutPatternRule > pointcutPatternRuleList = pointcut . getPointcutPatternRuleList ( ) ; if ( pointcutPatternRuleList != null ) { for ( PointcutPatternRule ppr : pointcutPatternRuleList ) { if ( ppr . getBeanIdPattern ( ) != null && ppr . getMatchedBeanCount ( ) == 0 ) { invalidPointcutPatterns ++ ; String msg = "No beans matching to '" + ppr . getBeanIdPattern ( ) + "'; aspectRule " + aspectRule ; if ( pointcutPatternVerifiable ) { log . error ( msg ) ; } else { log . debug ( msg ) ; } } if ( ppr . getClassNamePattern ( ) != null && ppr . getMatchedClassCount ( ) == 0 ) { invalidPointcutPatterns ++ ; String msg = "No beans matching to '@class:" + ppr . getClassNamePattern ( ) + "'; aspectRule " + aspectRule ; if ( pointcutPatternVerifiable ) { log . error ( msg ) ; } else { log . debug ( msg ) ; } } if ( ppr . getMethodNamePattern ( ) != null && ppr . getMatchedMethodCount ( ) == 0 ) { invalidPointcutPatterns ++ ; String msg = "No beans have methods matching to '^" + ppr . getMethodNamePattern ( ) + "'; aspectRule " + aspectRule ; if ( pointcutPatternVerifiable ) { log . error ( msg ) ; } else { log . debug ( msg ) ; } } } } } } if ( invalidPointcutPatterns > 0 ) { String msg = "Invalid pointcut detected: " + invalidPointcutPatterns + "; Please check the logs for more information" ; if ( pointcutPatternVerifiable ) { log . error ( msg ) ; throw new InvalidPointcutPatternException ( msg ) ; } else { log . debug ( msg ) ; } } } AspectAdviceRuleRegistry sessionScopeAarr = sessionScopeAspectAdviceRulePostRegister . getAspectAdviceRuleRegistry ( ) ; if ( sessionScopeAarr != null ) { aspectRuleRegistry . setSessionAspectAdviceRuleRegistry ( sessionScopeAarr ) ; }
public class BootstrapContextImpl { /** * Instantiate a new ResourceAdapter and set each < config - property > on it . * @ return configured resource adapter . * @ throws Exception if an error occurs during configuration and onError = FAIL */ @ FFDCIgnore ( value = { } }
NumberFormatException . class , Throwable . class } ) private ResourceAdapter configureResourceAdapter ( ) throws Exception { final boolean trace = TraceComponent . isAnyTracingEnabled ( ) ; String resourceAdapterClassName = ( String ) properties . get ( RESOURCE_ADAPTER_CLASS ) ; if ( resourceAdapterClassName == null ) return null ; ResourceAdapter instance = ( ResourceAdapter ) loadClass ( resourceAdapterClassName ) . newInstance ( ) ; // Assume all configured properties are invalid until we find them Set < String > invalidPropNames = new HashSet < String > ( ) ; for ( Enumeration < String > names = properties . keys ( ) ; names . hasMoreElements ( ) ; ) { String name = names . nextElement ( ) ; if ( ! INTERNAL_PROPS . contains ( name ) && ! Constants . OBJECTCLASS . equals ( name ) && name . indexOf ( '.' ) < 0 && name . indexOf ( '-' ) < 0 && ! name . endsWith ( "Ref" ) ) invalidPropNames . add ( name ) ; } Class < ? > objectClass = instance . getClass ( ) ; for ( PropertyDescriptor descriptor : Introspector . getBeanInfo ( objectClass ) . getPropertyDescriptors ( ) ) { String name = MetatypeGenerator . toCamelCase ( descriptor . getName ( ) ) ; Object value = properties . get ( name ) ; propertyDescriptors . put ( name , descriptor ) ; if ( value != null ) try { invalidPropNames . remove ( name ) ; boolean isProtectedString = value instanceof SerializableProtectedString ; if ( isProtectedString ) value = new String ( ( ( SerializableProtectedString ) value ) . getChars ( ) ) ; if ( value instanceof String && name . toUpperCase ( ) . indexOf ( "PASSWORD" ) >= 0 ) { value = PasswordUtil . getCryptoAlgorithm ( ( String ) value ) == null ? value : PasswordUtil . decode ( ( String ) value ) ; isProtectedString = true ; } if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "set " + name + '=' + ( isProtectedString ? "***" : value ) ) ; Class < ? > type = descriptor . getPropertyType ( ) ; Method writeMethod = descriptor . getWriteMethod ( ) ; // Some ra . xml files specify everything as String even when that ' s not true . If so , try to convert it : if ( value instanceof String ) { if ( ! type . isAssignableFrom ( value . getClass ( ) ) ) try { value = Utils . convert ( ( String ) value , type ) ; } catch ( NumberFormatException numFormatX ) { // If the property type can ' t be converted to what the bean info wants , // then go looking for a matching method of the proper type ( that isn ' t on the bean info ) . try { writeMethod = objectClass . getMethod ( writeMethod . getName ( ) , String . class ) ; } catch ( NoSuchMethodException x ) { throw numFormatX ; } } } // Allow the metatype to use primitive types instead of String , in which case we can easily convert to String : else if ( String . class . equals ( type ) ) value = value . toString ( ) ; // When ibm : type = " duration " is used , we always get Long . If necessary , convert to another numeric type : else if ( value instanceof Number && ! type . isAssignableFrom ( value . getClass ( ) ) ) value = Utils . convert ( ( Number ) value , type ) ; writeMethod . invoke ( instance , value ) ; } catch ( Throwable x ) { x = x instanceof InvocationTargetException ? x . getCause ( ) : x ; x = Utils . ignoreWarnOrFail ( tc , x , x . getClass ( ) , "J2CA8500.config.prop.error" , name , getConfigElementName ( ) , objectClass . getName ( ) , x ) ; if ( x != null ) { InvalidPropertyException propX = new InvalidPropertyException ( name , x ) ; propX . setInvalidPropertyDescriptors ( new PropertyDescriptor [ ] { descriptor } ) ; FFDCFilter . processException ( propX , getClass ( ) . getName ( ) , "239" ) ; throw propX ; } } } // Invalid properties for ( String name : invalidPropNames ) { InvalidPropertyException x = Utils . ignoreWarnOrFail ( tc , null , InvalidPropertyException . class , "J2CA8501.config.prop.unknown" , name , getConfigElementName ( ) , objectClass . getName ( ) ) ; if ( x != null ) { FFDCFilter . processException ( x , Utils . class . getName ( ) , "249" ) ; throw x ; } } if ( bvalHelper != null ) { ResourceAdapterMetaData raMetaData = resourceAdapterSvc . getResourceAdapterMetaData ( ) ; if ( raMetaData != null ) { bvalHelper . validateInstance ( raMetaData . getModuleMetaData ( ) , resourceAdapterSvc . getClassLoader ( ) , instance ) ; } } return instance ;
public class MeasureToViewMap { /** * Returns the subset of the given views that should be exported */ private static Set < View > filterExportedViews ( Collection < View > allViews ) { } }
Set < View > views = Sets . newHashSet ( ) ; for ( View view : allViews ) { if ( view . getWindow ( ) instanceof View . AggregationWindow . Cumulative ) { views . add ( view ) ; } } return Collections . unmodifiableSet ( views ) ;
public class StyleUtils { /** * 设置run的样式 * @ param run * @ param style */ public static void styleRun ( XWPFRun run , Style style ) { } }
if ( null == run || null == style ) return ; String color = style . getColor ( ) ; String fontFamily = style . getFontFamily ( ) ; int fontSize = style . getFontSize ( ) ; Boolean bold = style . isBold ( ) ; Boolean italic = style . isItalic ( ) ; Boolean strike = style . isStrike ( ) ; Boolean underLine = style . isUnderLine ( ) ; if ( StringUtils . isNotBlank ( color ) ) run . setColor ( color ) ; if ( 0 != fontSize ) run . setFontSize ( fontSize ) ; if ( StringUtils . isNotBlank ( fontFamily ) ) { run . setFontFamily ( fontFamily ) ; CTRPr pr = run . getCTR ( ) . isSetRPr ( ) ? run . getCTR ( ) . getRPr ( ) : run . getCTR ( ) . addNewRPr ( ) ; CTFonts fonts = pr . isSetRFonts ( ) ? pr . getRFonts ( ) : pr . addNewRFonts ( ) ; fonts . setAscii ( fontFamily ) ; fonts . setHAnsi ( fontFamily ) ; fonts . setCs ( fontFamily ) ; fonts . setEastAsia ( fontFamily ) ; } if ( null != bold ) run . setBold ( bold ) ; if ( null != italic ) run . setItalic ( italic ) ; if ( null != strike ) run . setStrikeThrough ( strike ) ; if ( Boolean . TRUE . equals ( underLine ) ) { run . setUnderline ( UnderlinePatterns . SINGLE ) ; }
public class CmsLocaleManager { /** * Clears the caches in the locale manager . < p > */ private void clearCaches ( ) { } }
// flush all caches OpenCms . getMemoryMonitor ( ) . flushCache ( CmsMemoryMonitor . CacheType . LOCALE ) ; CmsResourceBundleLoader . flushBundleCache ( ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( Messages . get ( ) . getBundle ( ) . key ( Messages . LOG_LOCALE_MANAGER_FLUSH_CACHE_1 , "EVENT_CLEAR_CACHES" ) ) ; }
public class SnapshotUtil { /** * Write the hashinator config file for a snapshot * @ param instId instance ID * @ param path path to which snapshot files will be written * @ param nonce nonce used to distinguish this snapshot * @ param hostId host ID where this is happening * @ param hashData serialized hash configuration data * @ return Runnable object for asynchronous write flushing * @ throws IOException */ public static Runnable writeHashinatorConfig ( InstanceId instId , String path , String nonce , int hostId , HashinatorSnapshotData hashData , boolean isTruncationSnapshot ) throws IOException { } }
final File file = new VoltFile ( path , constructHashinatorConfigFilenameForNonce ( nonce , hostId ) ) ; if ( file . exists ( ) ) { if ( ! file . delete ( ) ) { if ( isTruncationSnapshot ) { VoltDB . crashLocalVoltDB ( "Unexpected exception while attempting to delete old hash file for truncation snapshot" ) ; } throw new IOException ( "Unable to replace existing hashinator config " + file ) ; } } boolean success = false ; try { final FileOutputStream fos = new FileOutputStream ( file ) ; ByteBuffer fileBuffer = hashData . saveToBuffer ( instId ) ; fos . getChannel ( ) . write ( fileBuffer ) ; success = true ; return new Runnable ( ) { @ Override public void run ( ) { try { fos . getChannel ( ) . force ( true ) ; } catch ( IOException e ) { if ( isTruncationSnapshot ) { VoltDB . crashLocalVoltDB ( "Unexpected exception while attempting to create hash file for truncation snapshot" , true , e ) ; } throw new RuntimeException ( e ) ; } finally { try { fos . close ( ) ; } catch ( IOException e ) { if ( isTruncationSnapshot ) { VoltDB . crashLocalVoltDB ( "Unexpected exception while attempting to create hash file for truncation snapshot" , true , e ) ; } throw new RuntimeException ( e ) ; } } } } ; } finally { if ( ! success ) { file . delete ( ) ; } }
public class EnvironmentPropertiesMarshaller { /** * Marshall the given parameter object . */ public void marshall ( EnvironmentProperties environmentProperties , ProtocolMarshaller protocolMarshaller ) { } }
if ( environmentProperties == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( environmentProperties . getPropertyGroups ( ) , PROPERTYGROUPS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class LongList { /** * This is equivalent to : * < pre > * < code > * if ( isEmpty ( ) ) { * return OptionalLong . empty ( ) ; * long result = elementData [ 0 ] ; * for ( int i = 1 ; i < size ; i + + ) { * result = accumulator . applyAsLong ( result , elementData [ i ] ) ; * return OptionalLong . of ( result ) ; * < / code > * < / pre > * @ param accumulator * @ return */ public < E extends Exception > OptionalLong reduce ( final Try . LongBinaryOperator < E > accumulator ) throws E { } }
if ( isEmpty ( ) ) { return OptionalLong . empty ( ) ; } long result = elementData [ 0 ] ; for ( int i = 1 ; i < size ; i ++ ) { result = accumulator . applyAsLong ( result , elementData [ i ] ) ; } return OptionalLong . of ( result ) ;
public class EncodingSchemeIDImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public void eSet ( int featureID , Object newValue ) { } }
switch ( featureID ) { case AfplibPackage . ENCODING_SCHEME_ID__ESID_CP : setESidCP ( ( Integer ) newValue ) ; return ; case AfplibPackage . ENCODING_SCHEME_ID__ESID_UD : setESidUD ( ( Integer ) newValue ) ; return ; } super . eSet ( featureID , newValue ) ;
public class XMLRoadUtil { /** * Read a road from the XML description . * @ param element is the XML node to read . * @ param pathBuilder is the tool to make paths absolute . * @ param resources is the tool that permits to gather the resources . * @ return the road . * @ throws IOException in case of error . */ public static RoadSegment readRoadSegment ( Element element , PathBuilder pathBuilder , XMLResources resources ) throws IOException { } }
return readRoadPolyline ( element , pathBuilder , resources ) ;
public class MultipartFormData { /** * 加入普通参数 * @ param name 参数名 * @ param value 参数值 */ private void putParameter ( String name , String value ) { } }
String [ ] params = requestParameters . get ( name ) ; params = params == null ? new String [ ] { value } : ArrayUtil . append ( params , value ) ; requestParameters . put ( name , params ) ;
public class Basic2DMatrix { /** * Creates a random { @ link Basic2DMatrix } of the given shape : * { @ code rows } x { @ code columns } . */ public static Basic2DMatrix random ( int rows , int columns , Random random ) { } }
double [ ] [ ] array = new double [ rows ] [ columns ] ; for ( int i = 0 ; i < rows ; i ++ ) { for ( int j = 0 ; j < columns ; j ++ ) { array [ i ] [ j ] = random . nextDouble ( ) ; } } return new Basic2DMatrix ( array ) ;
public class Dim { /** * Evaluates the given script . */ public void evalScript ( final String url , final String text ) { } }
DimIProxy action = new DimIProxy ( this , IPROXY_EVAL_SCRIPT ) ; action . url = url ; action . text = text ; action . withContext ( ) ;
public class JournalKelpImpl { /** * Writes the put to the journal . */ void put ( RowCursor cursor ) { } }
boolean isValid ; do { isValid = true ; try ( JournalOutputStream os = openItem ( ) ) { os . write ( CODE_PUT ) ; cursor . writeJournal ( os ) ; isValid = os . complete ( ) ; } catch ( IOException e ) { log . log ( Level . FINER , e . toString ( ) , e ) ; } } while ( ! isValid ) ;
public class AbstractHostPartitionConnectionPool { /** * Add host to the system . May need to rebuild the partition map of the system * @ param host * @ param refresh */ @ Override public final synchronized boolean addHost ( Host host , boolean refresh ) { } }
// Already exists if ( hosts . containsKey ( host ) ) { // Check to see if we are adding token ranges or if the token ranges changed // which will force a rebuild of the token topology Host existingHost = hosts . get ( host ) . getHost ( ) ; if ( existingHost . getTokenRanges ( ) . size ( ) != host . getTokenRanges ( ) . size ( ) ) { existingHost . setTokenRanges ( host . getTokenRanges ( ) ) ; return true ; } ArrayList < TokenRange > currentTokens = Lists . newArrayList ( existingHost . getTokenRanges ( ) ) ; ArrayList < TokenRange > newTokens = Lists . newArrayList ( host . getTokenRanges ( ) ) ; Collections . sort ( currentTokens , compareByStartToken ) ; Collections . sort ( newTokens , compareByStartToken ) ; for ( int i = 0 ; i < currentTokens . size ( ) ; i ++ ) { if ( ! currentTokens . get ( i ) . getStartToken ( ) . equals ( newTokens . get ( i ) . getStartToken ( ) ) || ! currentTokens . get ( i ) . getEndToken ( ) . equals ( newTokens . get ( i ) . getEndToken ( ) ) ) { return false ; } } existingHost . setTokenRanges ( host . getTokenRanges ( ) ) ; return true ; } else { HostConnectionPool < CL > pool = newHostConnectionPool ( host , factory , config ) ; if ( null == hosts . putIfAbsent ( host , pool ) ) { try { monitor . onHostAdded ( host , pool ) ; if ( refresh ) { topology . addPool ( pool ) ; rebuildPartitions ( ) ; } pool . primeConnections ( config . getInitConnsPerHost ( ) ) ; } catch ( Exception e ) { // Ignore , pool will have been marked down internally } return true ; } else { return false ; } }
public class AfplibPackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getPGD ( ) { } }
if ( pgdEClass == null ) { pgdEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( AfplibPackage . eNS_URI ) . getEClassifiers ( ) . get ( 312 ) ; } return pgdEClass ;