signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class GetRelationalDatabaseEventsResult { /** * An object describing the result of your get relational database events request . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setRelationalDatabaseEvents ( java . util . Collection ) } or * { @ link # withRelationalDatabaseEvents ( java . util . Collection ) } if you want to override the existing values . * @ param relationalDatabaseEvents * An object describing the result of your get relational database events request . * @ return Returns a reference to this object so that method calls can be chained together . */ public GetRelationalDatabaseEventsResult withRelationalDatabaseEvents ( RelationalDatabaseEvent ... relationalDatabaseEvents ) { } }
if ( this . relationalDatabaseEvents == null ) { setRelationalDatabaseEvents ( new java . util . ArrayList < RelationalDatabaseEvent > ( relationalDatabaseEvents . length ) ) ; } for ( RelationalDatabaseEvent ele : relationalDatabaseEvents ) { this . relationalDatabaseEvents . add ( ele ) ; } return this ;
public class JCudaDriver { /** * A wrapper function for * { @ link JCudaDriver # cuModuleLoadDataEx ( CUmodule , Pointer , int , int [ ] , Pointer ) } * which allows passing in the image data as a string . * @ param module Returned module * @ param image Module data to load * @ param numOptions Number of options * @ param options Options for JIT * @ param optionValues Option values for JIT * @ return The return code from < code > cuModuleLoadDataEx < / code > * @ see # cuModuleLoadDataEx ( CUmodule , Pointer , int , int [ ] , Pointer ) */ public static int cuModuleLoadDataEx ( CUmodule phMod , String string , int numOptions , int options [ ] , Pointer optionValues ) { } }
byte bytes [ ] = string . getBytes ( ) ; byte image [ ] = Arrays . copyOf ( bytes , bytes . length + 1 ) ; return cuModuleLoadDataEx ( phMod , Pointer . to ( image ) , numOptions , options , optionValues ) ;
public class XmlEscape { /** * Perform an XML < strong > unescape < / strong > operation on a < tt > char [ ] < / tt > input . * No additional configuration arguments are required . Unescape operations * will always perform < em > complete < / em > XML 1.0/1.1 unescape of CERs , decimal * and hexadecimal references . * This method is < strong > thread - safe < / strong > . * @ param text the < tt > char [ ] < / tt > to be unescaped . * @ param offset the position in < tt > text < / tt > at which the unescape operation should start . * @ param len the number of characters in < tt > text < / tt > that should be unescaped . * @ param writer the < tt > java . io . Writer < / tt > to which the unescaped result will be written . Nothing will * be written at all to this writer if input is < tt > null < / tt > . * @ throws IOException if an input / output exception occurs */ public static void unescapeXml ( final char [ ] text , final int offset , final int len , final Writer writer ) throws IOException { } }
if ( writer == null ) { throw new IllegalArgumentException ( "Argument 'writer' cannot be null" ) ; } final int textLen = ( text == null ? 0 : text . length ) ; if ( offset < 0 || offset > textLen ) { throw new IllegalArgumentException ( "Invalid (offset, len). offset=" + offset + ", len=" + len + ", text.length=" + textLen ) ; } if ( len < 0 || ( offset + len ) > textLen ) { throw new IllegalArgumentException ( "Invalid (offset, len). offset=" + offset + ", len=" + len + ", text.length=" + textLen ) ; } // The chosen symbols ( 1.0 or 1.1 ) don ' t really matter , as both contain the same CERs XmlEscapeUtil . unescape ( text , offset , len , writer , XmlEscapeSymbols . XML11_SYMBOLS ) ;
public class PrefixedCollapsibleMap { /** * Flatten a Map of String , Object into a Map of String , String where keys are ' . ' separated * and prepends a key . * @ param map map to transform * @ param prefix key to prepend * @ return flattened map */ public static Map < String , String > serialize ( Map < String , Object > map , String prefix ) { } }
if ( map == null || map . isEmpty ( ) ) { return Collections . emptyMap ( ) ; } Map < String , String > flattened = flatten ( map , new HashMap < String , String > ( ) , new ArrayList < String > ( ) ) ; Map < String , String > result = new HashMap < > ( ) ; for ( Map . Entry < String , String > entry : flattened . entrySet ( ) ) { result . put ( prefix + "." + entry . getKey ( ) , entry . getValue ( ) ) ; } return result ;
public class TypeAutoCast { /** * 从ResultSet中读出数据并转成成对应的类型 , 如果指定类型rs无法转换 , 则不转换 。 * 2018年4月24日 11:48:32 新增支持标记为isJSON的列的处理 。 * @ param rs * @ param columnName * @ return */ public static Object getFromRS ( ResultSet rs , String columnName , Field field ) throws SQLException { } }
if ( rs . getObject ( columnName ) == null ) { // 保证null会返回null值 return null ; } Column column = field . getAnnotation ( Column . class ) ; if ( column != null && column . isJSON ( ) ) { // 优先处理标记为json的列 String valStr = rs . getString ( columnName ) ; if ( valStr == null || valStr . trim ( ) . isEmpty ( ) ) { return null ; } String typeName = field . getGenericType ( ) . toString ( ) ; try { if ( ! typeName . contains ( "<" ) ) { return OBJECT_MAPPER . readValue ( valStr , field . getType ( ) ) ; } else { // 处理泛型 JavaType type = parseGenericType ( OBJECT_MAPPER . getTypeFactory ( ) , typeName ) ; return OBJECT_MAPPER . readValue ( valStr , type ) ; } } catch ( Exception e ) { LOGGER . error ( "parse column to JSON fail, json:{}, type:{}" , valStr , typeName , e ) ; return valStr ; // 作为string返回 , 交由上一级处理 } } Class < ? > clazz = field . getType ( ) ; if ( clazz == Integer . class || clazz == int . class ) { return rs . getInt ( columnName ) ; } if ( clazz == Long . class || clazz == long . class ) { return rs . getLong ( columnName ) ; } if ( clazz == Byte . class || clazz == byte . class ) { return rs . getByte ( columnName ) ; } if ( clazz == byte [ ] . class ) { return rs . getBytes ( columnName ) ; } if ( clazz == Short . class || clazz == short . class ) { return rs . getShort ( columnName ) ; } if ( clazz == Boolean . class || clazz == boolean . class ) { return rs . getBoolean ( columnName ) ; } if ( clazz == Float . class || clazz == float . class ) { return rs . getFloat ( columnName ) ; } if ( clazz == Double . class || clazz == double . class ) { return rs . getDouble ( columnName ) ; } if ( clazz == String . class ) { return rs . getString ( columnName ) ; } if ( clazz == BigDecimal . class ) { return rs . getBigDecimal ( columnName ) ; } if ( clazz == java . sql . Date . class ) { return rs . getDate ( columnName ) ; } if ( clazz == java . sql . Time . class ) { return rs . getDate ( columnName ) ; } if ( clazz == java . sql . Timestamp . class ) { return rs . getTimestamp ( columnName ) ; } return rs . getObject ( columnName ) ;
public class PathMap { /** * Return the portion of a path that is after a path spec . * @ return The path info string */ public static String pathInfo ( String pathSpec , String path ) { } }
char c = pathSpec . charAt ( 0 ) ; if ( c == '/' ) { if ( pathSpec . length ( ) == 1 ) return null ; if ( pathSpec . equals ( path ) ) return null ; if ( pathSpec . endsWith ( "/*" ) && pathSpec . regionMatches ( 0 , path , 0 , pathSpec . length ( ) - 2 ) ) { if ( path . length ( ) == pathSpec . length ( ) - 2 ) return null ; return path . substring ( pathSpec . length ( ) - 2 ) ; } } return null ;
public class PublicKeyWriter { /** * Write the given { @ link PublicKey } into the given { @ link File } . * @ param publicKey * the public key * @ param file * the file to write in * @ throws IOException * Signals that an I / O exception has occurred . */ public static void write ( final PublicKey publicKey , final @ NonNull File file ) throws IOException { } }
write ( publicKey , new FileOutputStream ( file ) ) ;
public class FinderColumn { /** * Central point for security context changes * This will be called when : * a ) the widget is attached ( default ) * b ) the security context changes ( i . e . scoped roles ) */ private void applySecurity ( final SecurityContext securityContext , boolean update ) { } }
// System . out . println ( " < < Process SecurityContext on column " + title + " : " + securityContext + " > > " ) ; // calculate accessible menu items filterNonPrivilegeOperations ( securityContext , accessibleTopMenuItems , topMenuItems ) ; filterNonPrivilegeOperations ( securityContext , accessibleMenuItems , menuItems ) ; // the top menu is build here if ( ! plain ) buildTopMenu ( headerMenu ) ; // the row level menu is build when the celltable is filled // hence we need to refresh it toggleRowLevelTools ( ( ) -> true ) ; // hide it Scheduler . get ( ) . scheduleDeferred ( new Scheduler . ScheduledCommand ( ) { @ Override public void execute ( ) { dataProvider . refresh ( ) ; Scheduler . get ( ) . scheduleDeferred ( new Scheduler . ScheduledCommand ( ) { @ Override public void execute ( ) { toggleRowLevelTools ( ( ) -> selectionModel . getSelectedObject ( ) == null ) ; // show when selected } } ) ; } } ) ;
public class BreakpointStoreOnCache { /** * info maybe turn to equal to another one after get filename from response . */ @ Override public BreakpointInfo findAnotherInfoFromCompare ( @ NonNull DownloadTask task , @ NonNull BreakpointInfo ignored ) { } }
final SparseArray < BreakpointInfo > clonedMap ; synchronized ( this ) { clonedMap = storedInfos . clone ( ) ; } final int size = clonedMap . size ( ) ; for ( int i = 0 ; i < size ; i ++ ) { final BreakpointInfo info = clonedMap . valueAt ( i ) ; if ( info == ignored ) continue ; if ( info . isSameFrom ( task ) ) { return info ; } } return null ;
public class BoxRequestItemUpdate { /** * Sets the new shared link for the item . * @ param sharedLink new shared link for the item . * @ return request with the updated shared link . */ public R setSharedLink ( BoxSharedLink sharedLink ) { } }
mBodyMap . put ( BoxItem . FIELD_SHARED_LINK , sharedLink ) ; return ( R ) this ;
public class DecisionTree { /** * Returns the impurity of a node . * @ param count the sample count in each class . * @ param n the number of samples in the node . * @ return the impurity of a node */ private double impurity ( int [ ] count , int n ) { } }
double impurity = 0.0 ; switch ( rule ) { case GINI : impurity = 1.0 ; for ( int i = 0 ; i < count . length ; i ++ ) { if ( count [ i ] > 0 ) { double p = ( double ) count [ i ] / n ; impurity -= p * p ; } } break ; case ENTROPY : for ( int i = 0 ; i < count . length ; i ++ ) { if ( count [ i ] > 0 ) { double p = ( double ) count [ i ] / n ; impurity -= p * Math . log2 ( p ) ; } } break ; case CLASSIFICATION_ERROR : impurity = 0 ; for ( int i = 0 ; i < count . length ; i ++ ) { if ( count [ i ] > 0 ) { impurity = Math . max ( impurity , count [ i ] / ( double ) n ) ; } } impurity = Math . abs ( 1 - impurity ) ; break ; } return impurity ;
public class ContextUri { /** * Returns a map of all query arguments . * @ return A map of all query arguments . */ public Map < String , String > getQuery ( ) { } }
String query = uri . getQuery ( ) ; String [ ] pairs = query . split ( "&" ) ; Map < String , String > args = new HashMap < > ( ) ; for ( String pair : pairs ) { int idx = pair . indexOf ( "=" ) ; try { args . put ( URLDecoder . decode ( pair . substring ( 0 , idx ) , "UTF-8" ) , URLDecoder . decode ( pair . substring ( idx + 1 ) , "UTF-8" ) ) ; } catch ( UnsupportedEncodingException e ) { throw new IllegalArgumentException ( e ) ; } } return args ;
public class Input { /** * Fills the buffer with more bytes . The default implementation reads from the { @ link # getInputStream ( ) InputStream } , if set . * Can be overridden to fill the bytes from another source . * @ return - 1 if there are no more bytes . */ protected int fill ( byte [ ] buffer , int offset , int count ) throws KryoException { } }
if ( inputStream == null ) return - 1 ; try { return inputStream . read ( buffer , offset , count ) ; } catch ( IOException ex ) { throw new KryoException ( ex ) ; }
public class BatchingContextImpl { /** * In the OM implementation this method is used to flag the batching * context so that upon the next call to executeBatch the OM transaction * being used is committed . * @ param xid */ public void updateXIDToCommitted ( PersistentTranId xid ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "updateXIDToCommitted" , "XID=" + xid ) ; if ( _deferredException == null ) { // We are committing a transaction . The transaction can // be either one - phase or two - phase so we need to check // our state and then update it so that commit is called // at executeBatch ( ) time . if ( _state == STATE_ACTIVE || _state == STATE_PREPARED ) { _state = STATE_COMMITTING ; } else { _deferredException = new PersistenceException ( "Cannot COMMIT batch as it not in the correct state! State=" + _stateToString [ _state ] ) ; } } else { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( tc , "No work attempted as an exception has already been thrown during this batch!" ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "updateXIDToCommitted" ) ;
public class FPGrowth { /** * Count the support of each 1 - item . * @ param relation Data * @ param dim Maximum dimensionality * @ return Item counts */ private int [ ] countItemSupport ( final Relation < BitVector > relation , final int dim ) { } }
final int [ ] counts = new int [ dim ] ; FiniteProgress prog = LOG . isVerbose ( ) ? new FiniteProgress ( "Finding frequent 1-items" , relation . size ( ) , LOG ) : null ; for ( DBIDIter iditer = relation . iterDBIDs ( ) ; iditer . valid ( ) ; iditer . advance ( ) ) { SparseFeatureVector < ? > bv = relation . get ( iditer ) ; // TODO : only count those which satisfy minlength ? for ( int it = bv . iter ( ) ; bv . iterValid ( it ) ; it = bv . iterAdvance ( it ) ) { counts [ bv . iterDim ( it ) ] ++ ; } LOG . incrementProcessed ( prog ) ; } LOG . ensureCompleted ( prog ) ; return counts ;
public class ControllerPlugin { /** * Returns function that takes { @ link Result } and { @ link ActionContext } as parameter * and returns a { @ link Result } . This function is called after ActFramework ' s processing * has finished . * The afterHandler allows plugin to inject logic to further process the returned result * or do some updates to ` ActionContext ` for example to change the response content type * etc . * The afterHandler shall always returns a result even if there is nothing to do with * it , it must return the result passed in . * @ param controllerClass the controller class * @ param actionMethod the action method * @ return a logic to be injected before ActFramework handling request */ public $ . Func2 < Result , ActionContext , Result > afterHandler ( Class < ? > controllerClass , Method actionMethod ) { } }
return DUMB_AFTER_HANDLER ;
public class OkRequest { /** * Write the name / value pair as form data to the request body * The pair specified will be URL - encoded in UTF - 8 and sent with the * ' application / x - www - form - urlencoded ' content - type * @ param name * @ param value * @ return this request */ public OkRequest < T > form ( final String name , final String value ) { } }
return form ( name , value , CHARSET_UTF8 ) ;
public class ModeShapeRepositoryFactoryBean { /** * Generate a JCR repository from the given configuration */ @ PostConstruct public void buildRepository ( ) { } }
try { LOGGER . info ( "Using repo config (classpath): {}" , repositoryConfiguration . getURL ( ) ) ; getPropertiesLoader ( ) . loadSystemProperties ( ) ; final RepositoryConfiguration config = RepositoryConfiguration . read ( repositoryConfiguration . getURL ( ) ) ; repository = modeShapeEngine . deploy ( config ) ; // next line ensures that repository starts before the factory is used . final org . modeshape . common . collection . Problems problems = repository . getStartupProblems ( ) ; for ( final org . modeshape . common . collection . Problem p : problems ) { LOGGER . error ( "ModeShape Start Problem: {}" , p . getMessageString ( ) ) ; // TODO determine problems that should be runtime errors } } catch ( final Exception e ) { throw new RepositoryRuntimeException ( e ) ; }
public class TracksAdapter { @ Override public void dismiss ( int i ) { } }
if ( mAdapterListener != null ) { if ( mHeaderView != null ) { i -- ; } mTracks . remove ( i ) ; mAdapterListener . onTrackDismissed ( i ) ; }
public class SimpleQuery { /** * Execute the provided query and returns the result as a List of java objects * @ return the list of resulting java entities */ public < T > List < T > asList ( ) { } }
String cacheKey = null ; // is the result of the query cached ? if ( isCacheable ( ) && transaction == null ) { cacheKey = getCacheKey ( ) ; List < Key > keys = getCacheManager ( ) . get ( cacheNamespace , cacheKey ) ; if ( keys != null ) { if ( isKeysOnly ( ) ) { return ( List ) keys ; } else { final Map < Key , T > values = entityManager . get ( keys ) ; return Lists . transform ( keys , new Function < Key , T > ( ) { @ Override public T apply ( Key key ) { return values . get ( key ) ; } } ) ; } } } // execute the query List < T > result = Lists . newArrayList ( ( Iterable < T > ) asIterable ( ) ) ; if ( isCacheable ( ) ) { Collection < Key > keys = isKeysOnly ( ) ? result : Collections2 . transform ( result , new EntityToKeyFunction ( classMetadata . getPersistentClass ( ) ) ) ; populateCache ( cacheKey , Lists . newArrayList ( keys ) ) ; } return result ;
public class ListDomainsResult { /** * A list of domain names that match the expression . * @ return A list of domain names that match the expression . */ public java . util . List < String > getDomainNames ( ) { } }
if ( domainNames == null ) { domainNames = new com . amazonaws . internal . SdkInternalList < String > ( ) ; } return domainNames ;
public class Command { /** * copy arguments to treat as commands */ private void copyArgumentsToCommands ( ) { } }
Iterator < Command > i = arguments . iterator ( ) ; while ( i . hasNext ( ) ) { Command cmd = i . next ( ) ; cmd . status = Command . CHILDREN_FILTERED ; operations . add ( cmd ) ; } arguments . clear ( ) ;
public class JavaParser { /** * src / main / resources / org / drools / compiler / semantics / java / parser / Java . g : 561:1 : qualifiedNameList : qualifiedName ( ' , ' qualifiedName ) * ; */ public final void qualifiedNameList ( ) throws RecognitionException { } }
int qualifiedNameList_StartIndex = input . index ( ) ; try { if ( state . backtracking > 0 && alreadyParsedRule ( input , 55 ) ) { return ; } // src / main / resources / org / drools / compiler / semantics / java / parser / Java . g : 562:5 : ( qualifiedName ( ' , ' qualifiedName ) * ) // src / main / resources / org / drools / compiler / semantics / java / parser / Java . g : 562:7 : qualifiedName ( ' , ' qualifiedName ) * { pushFollow ( FOLLOW_qualifiedName_in_qualifiedNameList2001 ) ; qualifiedName ( ) ; state . _fsp -- ; if ( state . failed ) return ; // src / main / resources / org / drools / compiler / semantics / java / parser / Java . g : 562:21 : ( ' , ' qualifiedName ) * loop77 : while ( true ) { int alt77 = 2 ; int LA77_0 = input . LA ( 1 ) ; if ( ( LA77_0 == 43 ) ) { alt77 = 1 ; } switch ( alt77 ) { case 1 : // src / main / resources / org / drools / compiler / semantics / java / parser / Java . g : 562:22 : ' , ' qualifiedName { match ( input , 43 , FOLLOW_43_in_qualifiedNameList2004 ) ; if ( state . failed ) return ; pushFollow ( FOLLOW_qualifiedName_in_qualifiedNameList2006 ) ; qualifiedName ( ) ; state . _fsp -- ; if ( state . failed ) return ; } break ; default : break loop77 ; } } } } catch ( RecognitionException re ) { reportError ( re ) ; recover ( input , re ) ; } finally { // do for sure before leaving if ( state . backtracking > 0 ) { memoize ( input , 55 , qualifiedNameList_StartIndex ) ; } }
public class Matchers { /** * Inverts the given matcher . */ public static < T > Matcher < T > not ( final Matcher < ? super T > p ) { } }
return new Not < T > ( p ) ;
public class SoundManager { /** * Invoked when an activator - event occurs . * @ param event an instance of Event */ @ Override public void eventFired ( EventModel event ) { } }
if ( event . containsDescriptor ( SoundIDs . StartRequest . descriptor ) ) { Identification identification = event . getListResourceContainer ( ) . provideResource ( "izou.common.resource.selector" ) . stream ( ) . map ( ResourceModel :: getResource ) . filter ( resource -> resource instanceof Identification ) . map ( resource -> ( Identification ) resource ) . findFirst ( ) . orElseGet ( event :: getSource ) ; AddOnModel addonModel = getMain ( ) . getAddOnInformationManager ( ) . getAddonModel ( identification ) ; if ( addonModel != null ) { requestPermanent ( addonModel , event . getSource ( ) , event . containsDescriptor ( SoundIDs . StartEvent . isUsingNonJava ) ) ; } } else if ( event . containsDescriptor ( SoundIDs . StartEvent . descriptor ) ) { checkAndUpdateIdentification ( event . getSource ( ) ) ; } else { Identification identification = event . getListResourceContainer ( ) . provideResource ( "izou.common.resource.selector" ) . stream ( ) . map ( ResourceModel :: getResource ) . filter ( resource -> resource instanceof Identification ) . map ( resource -> ( Identification ) resource ) . findFirst ( ) . orElseGet ( event :: getSource ) ; AddOnModel addonModel = getMain ( ) . getAddOnInformationManager ( ) . getAddonModel ( identification ) ; if ( addonModel != null ) { endPermanent ( addonModel ) ; } }
public class Market { /** * Delete an order from an order book . * < p > An update event is triggered . * < p > If the order identifier is unknown , do nothing . * @ param orderId the order identifier */ public void delete ( long orderId ) { } }
Order order = orders . get ( orderId ) ; if ( order == null ) { return ; } OrderBook book = order . getOrderBook ( ) ; boolean bbo = book . update ( order . getSide ( ) , order . getPrice ( ) , - order . getRemainingQuantity ( ) ) ; orders . remove ( orderId ) ; listener . update ( book , bbo ) ;
public class Type { /** * Accessed by the TypeChecker , to override the default . */ public Type setArrayElementType ( Type elementType ) throws IntrospectionException { } }
Type type = new Type ( mGenericType , mNaturalClass ) ; type . checkForArrayLookup ( ) ; type . mArrayElementType = elementType ; return type ;
public class Schema { /** * remove in favour of PropertyColumn */ Map < String , Map < String , PropertyType > > getAllTables ( ) { } }
Map < String , Map < String , PropertyType > > result = new HashMap < > ( ) ; for ( Map . Entry < String , VertexLabel > vertexLabelEntry : this . vertexLabels . entrySet ( ) ) { String vertexQualifiedName = this . name + "." + VERTEX_PREFIX + vertexLabelEntry . getValue ( ) . getLabel ( ) ; result . put ( vertexQualifiedName , vertexLabelEntry . getValue ( ) . getPropertyTypeMap ( ) ) ; } if ( this . topology . isSqlWriteLockHeldByCurrentThread ( ) ) { for ( Map . Entry < String , VertexLabel > vertexLabelEntry : this . uncommittedVertexLabels . entrySet ( ) ) { String vertexQualifiedName = vertexLabelEntry . getKey ( ) ; VertexLabel vertexLabel = vertexLabelEntry . getValue ( ) ; result . put ( vertexQualifiedName , vertexLabel . getPropertyTypeMap ( ) ) ; } } for ( EdgeLabel edgeLabel : this . getEdgeLabels ( ) . values ( ) ) { String edgeQualifiedName = this . name + "." + EDGE_PREFIX + edgeLabel . getLabel ( ) ; result . put ( edgeQualifiedName , edgeLabel . getPropertyTypeMap ( ) ) ; } return result ;
public class ListResourceComplianceSummariesResult { /** * A summary count for specified or targeted managed instances . Summary count includes information about compliant * and non - compliant State Manager associations , patch status , or custom items according to the filter criteria that * you specify . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setResourceComplianceSummaryItems ( java . util . Collection ) } or * { @ link # withResourceComplianceSummaryItems ( java . util . Collection ) } if you want to override the existing values . * @ param resourceComplianceSummaryItems * A summary count for specified or targeted managed instances . Summary count includes information about * compliant and non - compliant State Manager associations , patch status , or custom items according to the * filter criteria that you specify . * @ return Returns a reference to this object so that method calls can be chained together . */ public ListResourceComplianceSummariesResult withResourceComplianceSummaryItems ( ResourceComplianceSummaryItem ... resourceComplianceSummaryItems ) { } }
if ( this . resourceComplianceSummaryItems == null ) { setResourceComplianceSummaryItems ( new com . amazonaws . internal . SdkInternalList < ResourceComplianceSummaryItem > ( resourceComplianceSummaryItems . length ) ) ; } for ( ResourceComplianceSummaryItem ele : resourceComplianceSummaryItems ) { this . resourceComplianceSummaryItems . add ( ele ) ; } return this ;
public class Utils { /** * Appends the given string encoding special HTML characters . * @ param out * The StringBuilder to write to . * @ param in * Input String . * @ param start * Input String starting position . * @ param end * Input String end position . */ public final static void appendCode ( final StringBuilder out , final String in , final int start , final int end ) { } }
for ( int i = start ; i < end ; i ++ ) { final char c ; switch ( c = in . charAt ( i ) ) { case '&' : out . append ( "&amp;" ) ; break ; case '<' : out . append ( "&lt;" ) ; break ; case '>' : out . append ( "&gt;" ) ; break ; default : out . append ( c ) ; break ; } }
public class TieredBlockStore { /** * Creates a temp block meta only if allocator finds available space . This method will not trigger * any eviction . * @ param sessionId session id * @ param blockId block id * @ param location location to create the block * @ param initialBlockSize initial block size in bytes * @ param newBlock true if this temp block is created for a new block * @ return a temp block created if successful , or null if allocation failed ( instead of throwing * { @ link WorkerOutOfSpaceException } because allocation failure could be an expected case ) * @ throws BlockAlreadyExistsException if there is already a block with the same block id */ private TempBlockMeta createBlockMetaInternal ( long sessionId , long blockId , BlockStoreLocation location , long initialBlockSize , boolean newBlock ) throws BlockAlreadyExistsException { } }
// NOTE : a temp block is supposed to be visible for its own writer , unnecessary to acquire // block lock here since no sharing try ( LockResource r = new LockResource ( mMetadataWriteLock ) ) { if ( newBlock ) { checkTempBlockIdAvailable ( blockId ) ; } StorageDirView dirView = mAllocator . allocateBlockWithView ( sessionId , initialBlockSize , location , getUpdatedView ( ) ) ; if ( dirView == null ) { // Allocator fails to find a proper place for this new block . return null ; } // TODO ( carson ) : Add tempBlock to corresponding storageDir and remove the use of // StorageDirView . createTempBlockMeta . TempBlockMeta tempBlock = dirView . createTempBlockMeta ( sessionId , blockId , initialBlockSize ) ; try { // Add allocated temp block to metadata manager . This should never fail if allocator // correctly assigns a StorageDir . mMetaManager . addTempBlockMeta ( tempBlock ) ; } catch ( WorkerOutOfSpaceException | BlockAlreadyExistsException e ) { // If we reach here , allocator is not working properly LOG . error ( "Unexpected failure: {} bytes allocated at {} by allocator, " + "but addTempBlockMeta failed" , initialBlockSize , location ) ; throw Throwables . propagate ( e ) ; } return tempBlock ; }
public class Files { /** * Là ¶ scht alle Dateien und Verzeichnisse in dem Ã1 ⁄ 4bergebenen Verzeichnis * @ param directory * Zu sà ¤ uberndes Verzeichnis */ public static boolean clearDirectory ( File directory ) { } }
Assert . isTrue ( directory . isDirectory ( ) , "Parameter ist kein Verzeichnis" ) ; File [ ] files = directory . listFiles ( ) ; boolean allDeleted = true ; for ( File file : files ) { if ( file . isDirectory ( ) ) { clearDirectory ( file ) ; } allDeleted &= file . delete ( ) ; } return allDeleted ;
public class IPConfig { /** * Entry point for running IPConfig . * An IP host or port identifier has to be supplied to specify the endpoint for the * KNX network access . < br > * To show the usage message of this tool on the console , supply the command line * option - help ( or - h ) . < br > * Command line options are treated case sensitive . Available options : * < ul > * < li > < code > - help - h < / code > show help message < / li > * < li > < code > - version < / code > show tool / library version and exit < / li > * < li > < code > - local - l < / code > local device management < / li > * < li > < code > - remote - r < / code > < i > KNX addr < / i > & nbsp ; remote property service < / li > * < li > < code > - localhost < / code > < i > id < / i > & nbsp ; local IP / host name < / li > * < li > < code > - localport < / code > < i > number < / i > & nbsp ; local UDP port ( default system * assigned ) < / li > * < li > < code > - port - p < / code > < i > number < / i > & nbsp ; UDP port on host ( default 3671 ) < / li > * < li > < code > - nat - n < / code > enable Network Address Translation < / li > * < li > < code > - serial - s < / code > use FT1.2 serial communication < / li > * < / ul > * For remote property service these options are available : * < ul > * < li > < code > - routing < / code > use KNXnet / IP routing < / li > * < li > < code > - medium - m < / code > < i > id < / i > & nbsp ; KNX medium [ tp0 | tp1 | p110 | p132 | rf ] * ( defaults to tp1 ) < / li > * < li > < code > - connect - c < / code > connection oriented mode < / li > * < li > < code > - authorize - a < / code > < i > key < / i > & nbsp ; authorize key to access KNX * device < / li > * < / ul > * < br > * In any case , the tool reads out the IP configuration of the connected endpoint and * writes it to standard output . < br > * Supply one or more of the following commands to change the IP configuration ( these * commands are accepted without regard to capitalization ) : * < ul > * < li > < code > IP < / code > < i > address < / i > & nbsp ; set the configured fixed IP address < / li > * < li > < code > subnet < / code > < i > address < / i > & nbsp ; set the configured IP subnet mask < / li > * < li > < code > gateway < / code > < i > address < / i > & nbsp ; set the configured IP address of * the default gateway < / li > * < li > < code > multicast < / code > < i > address < / i > & nbsp ; set the routing multicast * address < / li > * < li > < code > manual < / code > set manual IP assignment for the current IP address to * enabled < / li > * < li > < code > BootP < / code > set Bootstrap Protocol IP assignment for the current IP * address to enabled < / li > * < li > < code > DHCP < / code > set DHCP IP assignment for the current IP address to * enabled < / li > * < li > < code > AutoIP < / code > set automatic IP assignment for the current IP address * to enabled < / li > * < / ul > * @ param args command line options to run the tool */ public static void main ( String [ ] args ) { } }
try { new IPConfig ( args ) ; } catch ( final Throwable t ) { if ( t . getMessage ( ) != null ) System . out . println ( t . getMessage ( ) ) ; }
public class DPathUtils { private static Object createIntermediate ( Object target , StringBuffer pathSofar , String index , String nextIndex ) { } }
if ( target instanceof JsonNode ) { return createIntermediate ( ( JsonNode ) target , pathSofar , index , nextIndex ) ; } Object value = PATTERN_INDEX . matcher ( nextIndex ) . matches ( ) ? new ArrayList < Object > ( ) : new HashMap < String , Object > ( ) ; return createIntermediate ( target , pathSofar , index , value ) ;
public class ByteArrayUtil { /** * Put the source < i > double < / i > into the destination byte array starting at the given offset * in big endian order . * There is no bounds checking . * @ param array destination byte array * @ param offset destination offset * @ param value source < i > double < / i > */ public static void putDoubleBE ( final byte [ ] array , final int offset , final double value ) { } }
putLongBE ( array , offset , Double . doubleToRawLongBits ( value ) ) ;
public class HBCIUtils { /** * Gibt zu einer gegebenen Bankleitzahl zurück , welche HBCI - Version für HBCI - PIN / TAN * bzw . RDH zu verwenden ist . Siehe auch { @ link # getHBCIVersionForBLZ ( String ) } * @ param blz * @ return HBCI - Version * @ deprecated Bitte { @ link HBCIUtils # getBankInfo ( String ) } verwenden . */ public static String getPinTanVersionForBLZ ( String blz ) { } }
BankInfo info = getBankInfo ( blz ) ; if ( info == null ) return "" ; return info . getPinTanVersion ( ) != null ? info . getPinTanVersion ( ) . getId ( ) : "" ;
public class TimeSeriesCountBytesSizeAccumulator { /** * Gets number . * @ param timestamp the timestamp * @ param valueFunction the value function * @ return the number */ public double getNumber ( long timestamp , Function < CountBytesSizeAccumulator , Long > valueFunction ) { } }
return JMOptional . getOptional ( this , timestamp ) . map ( valueFunction ) . map ( ( Long :: doubleValue ) ) . orElse ( 0d ) ;
public class SimpleBitSet { public static void main ( String [ ] args ) throws Exception { } }
File f = new File ( "/tmp/test.2" ) ; SimpleBitSet bs = new SimpleBitSet ( ) ; bs . set ( 1198 ) ; bs . set ( 23 ) ; serializeToFile ( f , bs ) ; bs . set ( 666 ) ; System . out . println ( bs . toString ( ) ) ; bs = deserializeFromFile ( f ) ; System . out . println ( bs . toString ( ) ) ; f . delete ( ) ;
public class ServletUtil { /** * Checks if a resource with the possibly - relative path exists . * @ deprecated Use regular methods directly * @ see ServletContext # getResource ( java . lang . String ) * @ see ServletContextCache # getResource ( java . lang . String ) * @ see ServletContextCache # getResource ( javax . servlet . ServletContext , java . lang . String ) */ @ Deprecated public static boolean resourceExists ( ServletContext servletContext , String path ) throws MalformedURLException { } }
return getResource ( servletContext , path ) != null ;
public class Utility { /** * end parseFixedLengthFloatString */ public float parseSingleFloatString ( String numberString , boolean constrained0to1 , boolean zeroOrGreater ) { } }
float [ ] value = parseFixedLengthFloatString ( numberString , 1 , constrained0to1 , zeroOrGreater ) ; return value [ 0 ] ;
public class AWSSecurityHubClient { /** * Updates the AWS Security Hub insight specified by the insight ARN . * @ param updateInsightRequest * @ return Result of the UpdateInsight operation returned by the service . * @ throws InternalException * Internal server error . * @ throws InvalidInputException * The request was rejected because an invalid or out - of - range value was supplied for an input parameter . * @ throws InvalidAccessException * AWS Security Hub is not enabled for the account used to make this request . * @ throws LimitExceededException * The request was rejected because it attempted to create resources beyond the current AWS account limits . * The error code describes the limit exceeded . * @ throws ResourceNotFoundException * The request was rejected because the specified resource cannot be found . * @ sample AWSSecurityHub . UpdateInsight * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / securityhub - 2018-10-26 / UpdateInsight " target = " _ top " > AWS API * Documentation < / a > */ @ Override public UpdateInsightResult updateInsight ( UpdateInsightRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeUpdateInsight ( request ) ;
public class JsonDeserializer { /** * fromJSON . * @ param response * a { @ link java . lang . String } object . * @ param target * a { @ link com . cloudcontrolled . api . response . Response } object . * @ param < T > * a T object . * @ return a { @ link com . cloudcontrolled . api . response . Response } object . */ @ SuppressWarnings ( "unchecked" ) public < T > Response < T > fromJSON ( String response , Response < T > target ) { } }
try { response = StandardizationUtil . getJSONStandardizer ( target ) . normalize ( response ) ; } catch ( Exception e ) { throw new SerializationException ( e ) ; } try { Response < T > fromJson = gson . fromJson ( response , target . getClass ( ) ) ; if ( fromJson == null ) { fromJson = target ; } return fromJson ; } catch ( JsonSyntaxException jse ) { throw new SerializationException ( jse ) ; }
public class SQLRebuilder { /** * Validate the provided options and perform any necessary startup tasks . */ @ Override public void start ( Map < String , String > options ) throws Exception { } }
// This must be done before starting " RebuildServer " // rather than after , so any application caches // ( in particular the hash map held by PIDGenerator ) // don ' t get out of sync with the database . blankExistingTables ( ) ; try { m_server = Rebuild . getServer ( ) ; // now get the connectionpool ConnectionPoolManager cpm = ( ConnectionPoolManager ) m_server . getModule ( "org.fcrepo.server.storage.ConnectionPoolManager" ) ; if ( cpm == null ) { throw new ModuleInitializationException ( "ConnectionPoolManager not loaded." , "ConnectionPoolManager" ) ; } m_connectionPool = cpm . getPool ( ) ; ensureFedoraTables ( ) ; // set m _ now , which is both when we are starting this job and the flag // that it was started m_now = System . currentTimeMillis ( ) ; startStatus ( m_now ) ; m_context = ReadOnlyContext . getContext ( "utility" , "fedoraAdmin" , "" , /* null , */ ReadOnlyContext . DO_OP ) ; ILowlevelStorage llstore = ( ILowlevelStorage ) m_server . getModule ( "org.fcrepo.server.storage.lowlevel.ILowlevelStorage" ) ; try { llstore . rebuildObject ( ) ; llstore . rebuildDatastream ( ) ; } catch ( LowlevelStorageException e ) { // TODO Auto - generated catch block e . printStackTrace ( ) ; } } catch ( InitializationException ie ) { logger . error ( "Error initializing" , ie ) ; throw ie ; }
public class Function { /** * Override this for GQuery methods which take a callback , but do not expect a * return value , apply to a single widget . * NOTE : If your query has non - widget elements you might need to override * ' public void f ( ) ' or ' public void f ( Element e ) ' to handle these elements and * avoid a runtime exception . */ public void f ( Widget w ) { } }
setElement ( w . getElement ( ) ) ; if ( loop ) { loop = false ; f ( ) ; } else { f ( w . getElement ( ) . < com . google . gwt . dom . client . Element > cast ( ) ) ; }
public class MainFrame { /** * GEN - LAST : event _ menuGoToFileActionPerformed */ private void menuUndoActionPerformed ( java . awt . event . ActionEvent evt ) { } }
// GEN - FIRST : event _ menuUndoActionPerformed final TabTitle title = this . getFocusedTab ( ) ; if ( title != null ) { this . menuUndo . setEnabled ( title . getProvider ( ) . undo ( ) ) ; this . menuRedo . setEnabled ( title . getProvider ( ) . isRedo ( ) ) ; }
public class HttpOutboundServiceContextImpl { /** * Retrieve all remaining buffers of the response message ' s body . This will * give the buffers without any modifications , avoiding decompression or * chunked encoding removal . * A null buffer array will be returned if there is no more data to get . * The caller is responsible for releasing these buffers when complete as the HTTP Channel does not keep track of them . * @ return WsByteBuffer [ ] * @ throws IOException * - - if a socket exceptions happens * @ throws IllegalHttpBodyException * - - if the body was malformed */ @ Override public WsByteBuffer [ ] getRawResponseBodyBuffers ( ) throws IOException , IllegalHttpBodyException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . entry ( tc , "getRawResponseBodyBuffers(sync)" ) ; } setRawBody ( true ) ; WsByteBuffer [ ] list = getResponseBodyBuffers ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . exit ( tc , "getRawResponseBodyBuffers(sync): " + list ) ; } return list ;
public class PredefinedMetricTransformer { /** * Returns a non - null list of metric datum for the metrics collected for the * given request / response . * @ param metricType the request metric type */ public List < MetricDatum > toMetricData ( MetricType metricType , Request < ? > request , Response < ? > response ) { } }
if ( metricType instanceof Field ) { // Predefined metrics across all AWS http clients Field predefined = ( Field ) metricType ; switch ( predefined ) { case HttpClientRetryCount : case HttpClientPoolAvailableCount : case HttpClientPoolLeasedCount : case HttpClientPoolPendingCount : return metricOfCount ( predefined , request , response ) ; case RequestCount : // intentionally fall through to reuse the same routine as RetryCount case RetryCount : return metricOfRequestOrRetryCount ( predefined , request , response ) ; case ThrottledRetryCount : // drop through case RetryCapacityConsumed : return counterMetricOf ( predefined , request , response , EXCLUDE_REQUEST_TYPE ) ; case ResponseProcessingTime : // drop through case RequestSigningTime : // drop through return latencyMetricOf ( predefined , request , response , EXCLUDE_REQUEST_TYPE ) ; case ClientExecuteTime : return latencyOfClientExecuteTime ( request , response ) ; case HttpClientSendRequestTime : case HttpClientReceiveResponseTime : case HttpRequestTime : case HttpSocketReadTime : return latencyMetricOf ( predefined , request , response , INCLUDE_REQUEST_TYPE ) ; case Exception : case ThrottleException : return counterMetricOf ( predefined , request , response , INCLUDE_REQUEST_TYPE ) ; default : break ; } } // Predefined metrics for specific service clients for ( AWSMetricTransformerFactory aws : AWSMetricTransformerFactory . values ( ) ) { if ( metricType . name ( ) . startsWith ( aws . name ( ) ) ) { List < MetricDatum > metricData = aws . getRequestMetricTransformer ( ) . toMetricData ( metricType , request , response ) ; if ( metricData != null ) return metricData ; break ; } } if ( log . isDebugEnabled ( ) ) { AmazonWebServiceRequest origReq = request == null ? null : request . getOriginalRequest ( ) ; String reqClassName = origReq == null ? null : origReq . getClass ( ) . getName ( ) ; log . debug ( "No request metric transformer can be found for metric type " + metricType . name ( ) + " for " + reqClassName ) ; } return Collections . emptyList ( ) ;
public class VersionString { /** * Returns the string as a map using the key " major " for the major part , the key " minor " for the minor part and the key " patch " for the patch part . * @ return map representation of the string */ public Map < String , Integer > toMap ( ) { } }
Map < String , Integer > ret = new HashMap < > ( ) ; ret . put ( "major" , this . major ) ; ret . put ( "minor" , this . minor ) ; ret . put ( "patch" , this . patch ) ; return ret ;
public class GenericFudge { /** * Fudge the return from getClass to include the generic . */ static public < T > Class < ? extends T > getClass ( T obj ) { } }
return ( Class < ? extends T > ) obj . getClass ( ) ;
public class UUID { /** * < p > Returns the node identifier found in this UUID . The specification was written such that this value holds the IEEE 802 MAC * address . The specification permits this value to be calculated from other sources other than the MAC . < / p > * @ return the node identifier found in this UUID . * @ throws UnsupportedOperationException thrown if this is not a IETF variant or not a time - based UUID . */ public long node ( ) throws UnsupportedOperationException { } }
// if variant is not mealling leach salz throw unsupported operation exception if ( variant ( ) != VARIANT_IETF_DRAFT || version ( ) != VERSION_ONE ) { throw new UnsupportedOperationException ( WRONG_VAR_VER_MSG ) ; } if ( node == null ) { byte [ ] b = new byte [ 8 ] ; System . arraycopy ( rawBytes , 10 , b , 2 , 6 ) ; node = new Long ( ( Bytes . toLong ( b ) & 0xFFFFFFFFFFFFL ) ) ; } return node . longValue ( ) ;
public class PackageIndexWriter { /** * Adds the overview summary comment for this documentation . Add one line * summary at the top of the page and generate a link to the description , * which is added at the end of this page . * @ param body the documentation tree to which the overview header will be added */ @ Override protected void addOverviewHeader ( Content body ) { } }
addConfigurationTitle ( body ) ; if ( ! utils . getFullBody ( configuration . overviewElement ) . isEmpty ( ) ) { HtmlTree div = new HtmlTree ( HtmlTag . DIV ) ; div . addStyle ( HtmlStyle . contentContainer ) ; addOverviewComment ( div ) ; if ( configuration . allowTag ( HtmlTag . MAIN ) ) { htmlTree . addContent ( div ) ; } else { body . addContent ( div ) ; } }
public class ClassInfo { /** * Get the table name . */ public String getTableNames ( boolean bAddQuotes ) { } }
return ( m_tableName == null ) ? Record . formatTableNames ( CLASS_INFO_FILE , bAddQuotes ) : super . getTableNames ( bAddQuotes ) ;
public class LogServlet { /** * Get the log files from the directory * @ param index * @ return A { @ link File } that represent the file to read from current directory . */ private File retrieveFileFromLogsFolder ( String index ) { } }
File [ ] logFiles = getLogsDirectory ( ) . listFiles ( new LogFilesFilter ( ) ) ; File fileToReturn = null ; for ( File eachLogFile : logFiles ) { String fileName = eachLogFile . getName ( ) . split ( "\\Q.\\E" ) [ 0 ] ; if ( fileName . endsWith ( index ) ) { fileToReturn = eachLogFile ; break ; } } return fileToReturn ;
public class MappingImpl { /** * clones a mapping and make it readOnly * @ param config * @ return cloned mapping * @ throws IOException */ public MappingImpl cloneReadOnly ( ConfigImpl config ) { } }
return new MappingImpl ( config , virtual , strPhysical , strArchive , inspect , physicalFirst , hidden , true , topLevel , appMapping , ignoreVirtual , appListener , listenerMode , listenerType ) ;
public class JNvgraph { /** * nvGRAPH PageRank * Find PageRank for each vertex of a graph with a given transition probabilities , a bookmark vector of dangling vertices , and the damping factor . */ public static int nvgraphPagerank ( nvgraphHandle handle , nvgraphGraphDescr descrG , long weight_index , Pointer alpha , long bookmark_index , int has_guess , long pagerank_index , float tolerance , int max_iter ) { } }
return checkResult ( nvgraphPagerankNative ( handle , descrG , weight_index , alpha , bookmark_index , has_guess , pagerank_index , tolerance , max_iter ) ) ;
public class InternalSimpleExpressionsParser { /** * InternalSimpleExpressions . g : 554:1 : entryRuleParenthesizedExpression returns [ EObject current = null ] : iv _ ruleParenthesizedExpression = ruleParenthesizedExpression EOF ; */ public final EObject entryRuleParenthesizedExpression ( ) throws RecognitionException { } }
EObject current = null ; EObject iv_ruleParenthesizedExpression = null ; try { // InternalSimpleExpressions . g : 555:2 : ( iv _ ruleParenthesizedExpression = ruleParenthesizedExpression EOF ) // InternalSimpleExpressions . g : 556:2 : iv _ ruleParenthesizedExpression = ruleParenthesizedExpression EOF { newCompositeNode ( grammarAccess . getParenthesizedExpressionRule ( ) ) ; pushFollow ( FOLLOW_1 ) ; iv_ruleParenthesizedExpression = ruleParenthesizedExpression ( ) ; state . _fsp -- ; current = iv_ruleParenthesizedExpression ; match ( input , EOF , FOLLOW_2 ) ; } } catch ( RecognitionException re ) { recover ( input , re ) ; appendSkippedTokens ( ) ; } finally { } return current ;
public class MethodUtils { /** * Invoke a static method that has no parameters . * @ param objectClass invoke static method on this class * @ param methodName get method with this name * @ return the value returned by the invoked method * @ throws NoSuchMethodException if there is no such accessible method * @ throws InvocationTargetException wraps an exception thrown by the method invoked * @ throws IllegalAccessException if the requested method is not accessible via reflection */ public static Object invokeExactStaticMethod ( Class < ? > objectClass , String methodName ) throws NoSuchMethodException , IllegalAccessException , InvocationTargetException { } }
return invokeExactStaticMethod ( objectClass , methodName , EMPTY_OBJECT_ARRAY , EMPTY_CLASS_PARAMETERS ) ;
public class KeyVaultClientBaseImpl { /** * Creates a new key , stores it , then returns key parameters and attributes to the client . * The create key operation can be used to create any key type in Azure Key Vault . If the named key already exists , Azure Key Vault creates a new version of the key . It requires the keys / create permission . * @ param vaultBaseUrl The vault name , for example https : / / myvault . vault . azure . net . * @ param keyName The name for the new key . The system will generate the version name for the new key . * @ param kty The type of key to create . For valid values , see JsonWebKeyType . Possible values include : ' EC ' , ' EC - HSM ' , ' RSA ' , ' RSA - HSM ' , ' oct ' * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the KeyBundle object */ public Observable < KeyBundle > createKeyAsync ( String vaultBaseUrl , String keyName , JsonWebKeyType kty ) { } }
return createKeyWithServiceResponseAsync ( vaultBaseUrl , keyName , kty ) . map ( new Func1 < ServiceResponse < KeyBundle > , KeyBundle > ( ) { @ Override public KeyBundle call ( ServiceResponse < KeyBundle > response ) { return response . body ( ) ; } } ) ;
public class TypeConvertingCompiler { /** * On Java - level the any - type is represented as java . lang . Object as there is no subtype of everything ( i . e . type for null ) . * So , when the values are used we need to manually cast them to whatever is expected . * This method tells us whether such a cast is needed . */ private boolean isToBeCastedAnyType ( LightweightTypeReference actualType , XExpression obj , ITreeAppendable appendable ) { } }
if ( actualType instanceof AnyTypeReference ) { if ( getReferenceName ( obj , appendable ) != null ) return true ; else if ( obj instanceof XBlockExpression ) { XBlockExpression blockExpression = ( XBlockExpression ) obj ; EList < XExpression > expressions = blockExpression . getExpressions ( ) ; if ( expressions . isEmpty ( ) ) return false ; if ( expressions . size ( ) > 1 ) return true ; XExpression last = expressions . get ( 0 ) ; return isToBeCastedAnyType ( actualType , last , appendable ) ; } } return false ;
public class XEventClasses { /** * Creates a new set of event classes , factory method . * @ param classifier * The classifier to be used for event comparison . * @ param log * The log , on which event classes should be imposed . * @ return A set of event classes , as an instance of this class . */ public static synchronized XEventClasses deriveEventClasses ( XEventClassifier classifier , XLog log ) { } }
XEventClasses nClasses = new XEventClasses ( classifier ) ; nClasses . register ( log ) ; nClasses . harmonizeIndices ( ) ; return nClasses ;
public class JdbcParameterFactory { /** * { @ link java . sql . Connection # createArrayOf ( String , Object [ ] ) } のラッパー * @ param conn コネクション * @ param typeName 配列の要素がマッピングされる型のSQL名 。 typeNameはデータベース固有の名前で 、 組込み型 、 ユーザー定義型 、 またはこのデータベースでサポートされる標準SQL型の名前のこと 。 これは 、 Array . getBaseTypeNameで返される値 * @ param elements 返されるオブジェクトを生成する要素 * @ return 指定されたSQL型に要素がマッピングされるArrayオブジェクト * @ see java . sql . Connection # createArrayOf ( String , Object [ ] ) */ public static Array createArrayOf ( final Connection conn , final String typeName , final Object [ ] elements ) { } }
try { return conn . createArrayOf ( typeName , elements ) ; } catch ( SQLException e ) { throw new UroborosqlRuntimeException ( e ) ; }
public class AlertIncidentsDeserializer { /** * Gson invokes this call - back method during deserialization when it encounters a field of the specified type . * @ param element The Json data being deserialized * @ param type The type of the Object to deserialize to * @ param context The JSON deserialization context * @ return The alert incidents */ @ Override public Collection < AlertIncident > deserialize ( JsonElement element , Type type , JsonDeserializationContext context ) throws JsonParseException { } }
JsonObject obj = element . getAsJsonObject ( ) ; JsonArray incidents = obj . getAsJsonArray ( "incidents" ) ; List < AlertIncident > values = new ArrayList < AlertIncident > ( ) ; if ( incidents != null && incidents . isJsonArray ( ) ) { for ( JsonElement incident : incidents ) values . add ( gson . fromJson ( incident , AlertIncident . class ) ) ; } return values ;
public class SessionAffinityManagerImpl { /** * analyzeSSLRequest - taken from WsSessionAffinityManager in WAS7 */ public SessionAffinityContext analyzeSSLRequest ( ServletRequest request , String sslSessionId ) { } }
if ( com . ibm . ejs . ras . TraceComponent . isAnyTracingEnabled ( ) && LoggingUtil . SESSION_LOGGER_CORE . isLoggable ( Level . FINE ) ) { LoggingUtil . SESSION_LOGGER_CORE . entering ( methodClassName , methodNames [ ANALYZE_SSL_REQUEST ] ) ; LoggingUtil . SESSION_LOGGER_CORE . logp ( Level . FINE , methodClassName , methodNames [ ANALYZE_SSL_REQUEST ] , "SSL Id from Request = " + sslSessionId ) ; } String sessionId = sslSessionId ; boolean reqFromCookie = false ; boolean reqFromURL = false ; boolean reqFromSSL = false ; SessionAffinityContext sessionAffinityContext = null ; List allSessionIds = null ; IExtendedRequest sessReq = ( IExtendedRequest ) request ; if ( sessionId != null ) { reqFromSSL = true ; String tempDummyId ; String tempCacheId ; if ( _smc . isUsingMemory ( ) ) { tempCacheId = "0000" ; } else { tempCacheId = "0001" ; } String tempCloneInfo = "" ; // look for cacheid and clone info in the cookie or Rewritten URL // cmd 213330 start String extendedId = null ; byte [ ] byteExtendedId = sessReq . getCookieValueAsBytes ( SessionManagerConfig . dcookieName ) ; if ( byteExtendedId != null ) { extendedId = new String ( byteExtendedId ) ; } // cmd 213330 end if ( extendedId == null ) { extendedId = getRequestedSessionIdFromURL ( request ) ; if ( com . ibm . ejs . ras . TraceComponent . isAnyTracingEnabled ( ) && LoggingUtil . SESSION_LOGGER_CORE . isLoggable ( Level . FINE ) ) { LoggingUtil . SESSION_LOGGER_CORE . logp ( Level . FINE , methodClassName , methodNames [ ANALYZE_SSL_REQUEST ] , "getRequestedSessionId - encoded URL contains: " + extendedId ) ; } } if ( extendedId != null ) { tempCacheId = extendedId . substring ( 0 , 4 ) ; // cacheid is always first int index = extendedId . indexOf ( SessionManagerConfig . getCloneSeparator ( ) ) ; if ( index != - 1 ) { tempDummyId = extendedId . substring ( 4 , index ) ; tempCloneInfo = extendedId . substring ( index ) ; } } sessionId = tempCacheId + SessionAffinityContext . SSLSessionId + tempCloneInfo ; allSessionIds = new ArrayList ( 1 ) ; allSessionIds . add ( 0 , sessionId ) ; sessionAffinityContext = new SessionAffinityContext ( allSessionIds , reqFromCookie , reqFromURL , reqFromSSL ) ; setNextId ( sessionAffinityContext ) ; } // Use SSL Sessionid NOT dummy id if ( com . ibm . ejs . ras . TraceComponent . isAnyTracingEnabled ( ) && LoggingUtil . SESSION_LOGGER_CORE . isLoggable ( Level . FINE ) ) { LoggingUtil . SESSION_LOGGER_CORE . logp ( Level . FINE , methodClassName , methodNames [ ANALYZE_SSL_REQUEST ] , "getRequestedSessionId - massaged long SSL id is now: " + sessionId ) ; } // returns null if no SSL context return sessionAffinityContext ;
public class CodeGenerator { /** * Create language alias mapping . */ private static void createLanguageAliases ( TypeSpec . Builder type , Map < String , String > languageAliases ) { } }
MethodSpec . Builder method = MethodSpec . methodBuilder ( "registerLanguageAliases" ) . addModifiers ( PRIVATE , STATIC ) ; for ( Map . Entry < String , String > entry : languageAliases . entrySet ( ) ) { method . addStatement ( "addLanguageAlias($S, $S)" , entry . getKey ( ) , entry . getValue ( ) ) ; } type . addMethod ( method . build ( ) ) ;
public class GeometryCollection { /** * { @ inheritDoc } */ @ Override public JSONObject toJSON ( ) throws JSONException { } }
JSONObject json = super . toJSON ( ) ; JSONArray geometries = new JSONArray ( ) ; for ( Geometry geometry : this . mGeometries ) { geometries . put ( geometry . toJSON ( ) ) ; } json . put ( JSON_GEOMETRIES , geometries ) ; return json ;
public class BsonReader { /** * Read the binary BSON representation from supplied input stream and construct the { @ link Document } representation . * @ param input the input stream ; may not be null * @ return the in - memory { @ link Document } representation * @ throws IOException if there was a problem reading from the stream */ public Array readArray ( DataInput input ) throws IOException { } }
// Create an object so that this reader is thread safe . . . DocumentValueFactory valueFactory = VALUE_FACTORY ; Reader reader = new Reader ( new BsonDataInput ( input ) , valueFactory ) ; reader . startArray ( ) ; return ( Array ) reader . endDocument ( ) ;
public class FileSystemView { /** * Unlocks source and copy files after copying content . Also closes the source file so its content * can be deleted if it was deleted . */ private void unlockSourceAndCopy ( File sourceFile , File copyFile ) { } }
ReadWriteLock sourceLock = sourceFile . contentLock ( ) ; if ( sourceLock != null ) { sourceLock . readLock ( ) . unlock ( ) ; } ReadWriteLock copyLock = copyFile . contentLock ( ) ; if ( copyLock != null ) { copyLock . writeLock ( ) . unlock ( ) ; } sourceFile . closed ( ) ;
public class UtilAerospike { /** * Converts from AerospikeRecord to cell class with deep ' s anotations . * @ param aerospikeRecord * @ param key * @ param aerospikeConfig * @ return * @ throws IllegalAccessException * @ throws InstantiationException * @ throws InvocationTargetException */ public static Cells getCellFromAerospikeRecord ( AerospikeKey key , AerospikeRecord aerospikeRecord , AerospikeDeepJobConfig aerospikeConfig ) throws IllegalAccessException , InstantiationException , InvocationTargetException { } }
String namespace = aerospikeConfig . getNamespace ( ) + "." + aerospikeConfig . getSet ( ) ; String setName = aerospikeConfig . getSet ( ) ; String [ ] inputColumns = aerospikeConfig . getInputColumns ( ) ; Tuple2 < String , Object > equalsFilter = aerospikeConfig . getEqualsFilter ( ) ; String equalsFilterBin = equalsFilter != null ? equalsFilter . _1 ( ) : null ; Object equalsFilterValue = equalsFilter != null ? equalsFilter . _2 ( ) : null ; Cells cells = namespace != null ? new Cells ( namespace ) : new Cells ( ) ; Map < String , Object > map = aerospikeRecord . bins ; if ( inputColumns != null ) { if ( equalsFilter == null || checkEqualityFilter ( map , equalsFilterBin , equalsFilterValue ) ) { for ( int i = 0 ; i < inputColumns . length ; i ++ ) { String binName = inputColumns [ i ] ; if ( map . containsKey ( binName ) ) { Cell cell = Cell . create ( binName , map . get ( binName ) ) ; if ( i == 0 ) { cell . setIsClusterKey ( true ) ; cell . setIsKey ( true ) ; } cells . add ( namespace , cell ) ; } else { throw new InvocationTargetException ( new Exception ( "There is no [" + binName + "] on aerospike [" + namespace + "." + setName + "] set" ) ) ; } } } } else { if ( equalsFilter == null || checkEqualityFilter ( map , equalsFilterBin , equalsFilterValue ) ) { int index = 0 ; for ( Map . Entry < String , Object > bin : map . entrySet ( ) ) { Cell cell = Cell . create ( bin . getKey ( ) , bin . getValue ( ) ) ; if ( index == 0 ) { cell . setIsClusterKey ( true ) ; cell . setIsKey ( true ) ; } cells . add ( namespace , cell ) ; index ++ ; } } } return cells ;
public class Conditions { /** * Returns a { @ link ICondition condition } that is satisfied by a { @ link org . cornutum . tcases . PropertySet } that contains * between a specified minimum ( exclusive ) and maximum ( exclusive ) number of instances of a property . */ public static Between betweenExclusive ( String property , int minimum , int maximum ) { } }
return new Between ( moreThan ( property , minimum ) , lessThan ( property , maximum ) ) ;
public class ReportSlaveIDResponse { /** * setData - - initialize the slave ' s device dependent data when * initializing a response . * @ param data byte array */ public void setData ( byte [ ] data ) { } }
// There are always two bytes of payload in the message - - the // slave ID and the run status indicator . if ( data == null ) { m_length = 2 ; m_data = new byte [ 0 ] ; return ; } if ( data . length > 249 ) { throw new IllegalArgumentException ( "data length limit exceeded" ) ; } m_length = data . length + 2 ; m_data = new byte [ data . length ] ; System . arraycopy ( data , 0 , m_data , 0 , data . length ) ;
public class ProjectApi { /** * Get a Pager of projects that were forked from the specified project . * < pre > < code > GET / projects / : id / forks < / code > < / pre > * @ param projectIdOrPath projectIdOrPath the project in the form of an Integer ( ID ) , String ( path ) , or Project instance , required * @ param itemsPerPage the number of Project instances that will be fetched per page * @ return a Pager of projects * @ throws GitLabApiException if any exception occurs */ public Pager < Project > getForks ( Object projectIdOrPath , int itemsPerPage ) throws GitLabApiException { } }
return new Pager < Project > ( this , Project . class , itemsPerPage , null , "projects" , getProjectIdOrPath ( projectIdOrPath ) , "forks" ) ;
public class TransactionTopologyBuilder { /** * Build bolt to provide the compatibility with Storm ' s ack mechanism * @ param id bolt Id * @ param bolt * @ return */ public BoltDeclarer setBoltWithAck ( String id , IRichBolt bolt , Number parallelismHint ) { } }
return setBolt ( id , new AckTransactionBolt ( bolt ) , parallelismHint ) ;
public class ResourceUtil { /** * A simple utility method to locate the outermost contextual File reference for the specified resource . * @ param r resource instance . * @ return outermost relevant file context . */ public static File getContextFile ( Resource < ? > r ) { } }
do { Object o = r . getUnderlyingResourceObject ( ) ; if ( o instanceof File ) { return ( File ) r . getUnderlyingResourceObject ( ) ; } } while ( ( r = r . getParent ( ) ) != null ) ; return null ;
public class TabPageIndicator { /** * Set the ViewPager associate with this indicator view . * @ param view The ViewPager view . */ public void setViewPager ( @ Nullable ViewPager view ) { } }
if ( mViewPager == view ) return ; if ( mViewPager != null ) { mViewPager . removeOnPageChangeListener ( this ) ; PagerAdapter adapter = mViewPager . getAdapter ( ) ; if ( adapter != null ) adapter . unregisterDataSetObserver ( mObserver ) ; } mViewPager = view ; if ( mViewPager != null ) { PagerAdapter adapter = mViewPager . getAdapter ( ) ; if ( adapter == null ) throw new IllegalStateException ( "ViewPager does not have adapter instance." ) ; adapter . registerDataSetObserver ( mObserver ) ; mViewPager . addOnPageChangeListener ( this ) ; notifyDataSetChanged ( ) ; onPageSelected ( mViewPager . getCurrentItem ( ) ) ; } else mTabContainer . removeAllViews ( ) ;
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public EClass getIfcStructuralLoadStatic ( ) { } }
if ( ifcStructuralLoadStaticEClass == null ) { ifcStructuralLoadStaticEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 651 ) ; } return ifcStructuralLoadStaticEClass ;
public class EthereumBlockReader { /** * Reads a raw Ethereum block into a ByteBuffer . This method is recommended if you are only interested in a small part of the block and do not need the deserialization of the full block , ie in case you generally skip a lot of blocks * @ return */ public ByteBuffer readRawBlock ( ) throws IOException , EthereumBlockReadException { } }
// basically an Ethereum Block is simply a RLP encoded list ByteBuffer result = null ; // get size of list this . in . mark ( 10 ) ; byte [ ] listHeader = new byte [ 10 ] ; int totalRead = 0 ; int bRead = this . in . read ( listHeader ) ; if ( bRead == - 1 ) { // no further block to read return result ; } else { totalRead += bRead ; while ( totalRead < 10 ) { bRead = this . in . read ( listHeader , totalRead , 10 - totalRead ) ; if ( bRead == - 1 ) { throw new EthereumBlockReadException ( "Error: Not enough block data available: " + String . valueOf ( bRead ) ) ; } totalRead += bRead ; } } ByteBuffer sizeByteBuffer = ByteBuffer . wrap ( listHeader ) ; long blockSize = EthereumUtil . getRLPListSize ( sizeByteBuffer ) ; // gets block size including indicator this . in . reset ( ) ; // check if blockSize is valid if ( blockSize == 0 ) { throw new EthereumBlockReadException ( "Error: Blocksize too small" ) ; } if ( blockSize < 0 ) { throw new EthereumBlockReadException ( "Error: This block size cannot be handled currently (larger then largest number in positive signed int)" ) ; } if ( blockSize > this . maxSizeEthereumBlock ) { throw new EthereumBlockReadException ( "Error: Block size is larger then defined in configuration - Please increase it if this is a valid block" ) ; } // read block int blockSizeInt = ( int ) ( blockSize ) ; byte [ ] fullBlock = new byte [ blockSizeInt ] ; int totalByteRead = 0 ; int readByte ; while ( ( readByte = this . in . read ( fullBlock , totalByteRead , blockSizeInt - totalByteRead ) ) > - 1 ) { totalByteRead += readByte ; if ( totalByteRead >= blockSize ) { break ; } } if ( totalByteRead != blockSize ) { throw new EthereumBlockReadException ( "Error: Could not read full block" ) ; } if ( ! ( this . useDirectBuffer ) ) { result = ByteBuffer . wrap ( fullBlock ) ; } else { preAllocatedDirectByteBuffer . clear ( ) ; // clear out old bytebuffer preAllocatedDirectByteBuffer . limit ( fullBlock . length ) ; // limit the bytebuffer result = preAllocatedDirectByteBuffer ; result . put ( fullBlock ) ; result . flip ( ) ; // put in read mode } result . order ( ByteOrder . LITTLE_ENDIAN ) ; return result ;
public class UNIXSocket { /** * Creates a new , unbound , " strict " { @ link UNIXSocket } . * This call uses an implementation that tries to be closer to the specification than * { @ link # newInstance ( ) } , at least for some cases . * @ return A new , unbound socket . */ public static UNIXSocket newStrictInstance ( ) throws IOException { } }
final UNIXSocketImpl impl = new UNIXSocketImpl ( ) ; UNIXSocket instance = new UNIXSocket ( impl ) ; instance . impl = impl ; return instance ;
public class AWSOrganizationsClient { /** * This action is available if all of the following are true : * < ul > * < li > * You are authorized to create accounts in the AWS GovCloud ( US ) Region . For more information on the AWS GovCloud * ( US ) Region , see the < a href = " http : / / docs . aws . amazon . com / govcloud - us / latest / UserGuide / welcome . html " > < i > AWS * GovCloud User Guide < / i > . < / a > * < / li > * < li > * You already have an account in the AWS GovCloud ( US ) Region that is associated with your master account in the * commercial Region . * < / li > * < li > * You call this action from the master account of your organization in the commercial Region . * < / li > * < li > * You have the < code > organizations : CreateGovCloudAccount < / code > permission . AWS Organizations creates the required * service - linked role named < code > AWSServiceRoleForOrganizations < / code > . For more information , see < a href = * " http : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ integrate _ services . html # orgs _ integrate _ services - using _ slrs " * > AWS Organizations and Service - Linked Roles < / a > in the < i > AWS Organizations User Guide < / i > . * < / li > * < / ul > * AWS automatically enables AWS CloudTrail for AWS GovCloud ( US ) accounts , but you should also do the following : * < ul > * < li > * Verify that AWS CloudTrail is enabled to store logs . * < / li > * < li > * Create an S3 bucket for AWS CloudTrail log storage . * For more information , see < a * href = " http : / / docs . aws . amazon . com / govcloud - us / latest / UserGuide / verifying - cloudtrail . html " > Verifying AWS CloudTrail * Is Enabled < / a > in the < i > AWS GovCloud User Guide < / i > . * < / li > * < / ul > * You call this action from the master account of your organization in the commercial Region to create a standalone * AWS account in the AWS GovCloud ( US ) Region . After the account is created , the master account of an organization * in the AWS GovCloud ( US ) Region can invite it to that organization . For more information on inviting standalone * accounts in the AWS GovCloud ( US ) to join an organization , see < a * href = " http : / / docs . aws . amazon . com / govcloud - us / latest / UserGuide / govcloud - organizations . html " > AWS Organizations < / a > * in the < i > AWS GovCloud User Guide . < / i > * Calling < code > CreateGovCloudAccount < / code > is an asynchronous request that AWS performs in the background . * Because < code > CreateGovCloudAccount < / code > operates asynchronously , it can return a successful completion message * even though account initialization might still be in progress . You might need to wait a few minutes before you * can successfully access the account . To check the status of the request , do one of the following : * < ul > * < li > * Use the < code > OperationId < / code > response element from this operation to provide as a parameter to the * < a > DescribeCreateAccountStatus < / a > operation . * < / li > * < li > * Check the AWS CloudTrail log for the < code > CreateAccountResult < / code > event . For information on using AWS * CloudTrail with Organizations , see < a * href = " http : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ monitoring . html " > Monitoring the Activity in * Your Organization < / a > in the < i > AWS Organizations User Guide . < / i > * < / li > * < / ul > * When you call the < code > CreateGovCloudAccount < / code > action , you create two accounts : a standalone account in the * AWS GovCloud ( US ) Region and an associated account in the commercial Region for billing and support purposes . The * account in the commercial Region is automatically a member of the organization whose credentials made the * request . Both accounts are associated with the same email address . * A role is created in the new account in the commercial Region that allows the master account in the organization * in the commercial Region to assume it . An AWS GovCloud ( US ) account is then created and associated with the * commercial account that you just created . A role is created in the new AWS GovCloud ( US ) account that can be * assumed by the AWS GovCloud ( US ) account that is associated with the master account of the commercial * organization . For more information and to view a diagram that explains how account access works , see < a * href = " http : / / docs . aws . amazon . com / govcloud - us / latest / UserGuide / govcloud - organizations . html " > AWS Organizations < / a > * in the < i > AWS GovCloud User Guide . < / i > * For more information about creating accounts , see < a * href = " https : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ accounts _ create . html " > Creating an * AWS Account in Your Organization < / a > in the < i > AWS Organizations User Guide . < / i > * < important > * < ul > * < li > * When you create an account in an organization using the AWS Organizations console , API , or CLI commands , the * information required for the account to operate as a standalone account , such as a payment method and signing the * end user license agreement ( EULA ) is < i > not < / i > automatically collected . If you must remove an account from your * organization later , you can do so only after you provide the missing information . Follow the steps at < a href = * " http : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ accounts _ remove . html # leave - without - all - info " * > To leave an organization as a member account < / a > in the < i > AWS Organizations User Guide . < / i > * < / li > * < li > * If you get an exception that indicates that you exceeded your account limits for the organization , contact < a * href = " https : / / console . aws . amazon . com / support / home # / " > AWS Support < / a > . * < / li > * < li > * If you get an exception that indicates that the operation failed because your organization is still initializing , * wait one hour and then try again . If the error persists , contact < a * href = " https : / / console . aws . amazon . com / support / home # / " > AWS Support < / a > . * < / li > * < li > * Using < code > CreateGovCloudAccount < / code > to create multiple temporary accounts isn ' t recommended . You can only * close an account from the AWS Billing and Cost Management console , and you must be signed in as the root user . * For information on the requirements and process for closing an account , see < a * href = " http : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ accounts _ close . html " > Closing an AWS * Account < / a > in the < i > AWS Organizations User Guide < / i > . * < / li > * < / ul > * < / important > < note > * When you create a member account with this operation , you can choose whether to create the account with the * < b > IAM User and Role Access to Billing Information < / b > switch enabled . If you enable it , IAM users and roles that * have appropriate permissions can view billing information for the account . If you disable it , only the account * root user can access billing information . For information about how to disable this switch for an account , see < a * href = " https : / / docs . aws . amazon . com / awsaccountbilling / latest / aboutv2 / grantaccess . html " > Granting Access to Your * Billing Information and Tools < / a > . * < / note > * @ param createGovCloudAccountRequest * @ return Result of the CreateGovCloudAccount operation returned by the service . * @ throws AccessDeniedException * You don ' t have permissions to perform the requested operation . The user or role that is making the * request must have at least one IAM permissions policy attached that grants the required permissions . For * more information , see < a href = " https : / / docs . aws . amazon . com / IAM / latest / UserGuide / access . html " > Access * Management < / a > in the < i > IAM User Guide < / i > . * @ throws AWSOrganizationsNotInUseException * Your account isn ' t a member of an organization . To make this request , you must use the credentials of an * account that belongs to an organization . * @ throws ConcurrentModificationException * The target of the operation is currently being modified by a different request . Try again later . * @ throws ConstraintViolationException * Performing this operation violates a minimum or maximum value limit . For example , attempting to remove * the last service control policy ( SCP ) from an OU or root , inviting or creating too many accounts to the * organization , or attaching too many policies to an account , OU , or root . This exception includes a reason * that contains additional information about the violated limit . < / p > * Some of the reasons in the following list might not be applicable to this specific API or operation : * < ul > * < li > * ACCOUNT _ NUMBER _ LIMIT _ EXCEEDED : You attempted to exceed the limit on the number of accounts in an * organization . If you need more accounts , contact < a * href = " https : / / console . aws . amazon . com / support / home # / " > AWS Support < / a > to request an increase in your * limit . * Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in * your organization . Send fewer invitations or contact AWS Support to request an increase in the number of * accounts . * < note > * Deleted and closed accounts still count toward your limit . * < / note > < important > * If you get receive this exception when running a command immediately after creating the organization , * wait one hour and try again . If after an hour it continues to fail with this error , contact < a * href = " https : / / console . aws . amazon . com / support / home # / " > AWS Support < / a > . * < / important > < / li > * < li > * HANDSHAKE _ RATE _ LIMIT _ EXCEEDED : You attempted to exceed the number of handshakes that you can send in one * day . * < / li > * < li > * OU _ NUMBER _ LIMIT _ EXCEEDED : You attempted to exceed the number of OUs that you can have in an organization . * < / li > * < li > * OU _ DEPTH _ LIMIT _ EXCEEDED : You attempted to create an OU tree that is too many levels deep . * < / li > * < li > * ORGANIZATION _ NOT _ IN _ ALL _ FEATURES _ MODE : You attempted to perform an operation that requires the * organization to be configured to support all features . An organization that supports only consolidated * billing features can ' t perform this operation . * < / li > * < li > * POLICY _ NUMBER _ LIMIT _ EXCEEDED . You attempted to exceed the number of policies that you can have in an * organization . * < / li > * < li > * MAX _ POLICY _ TYPE _ ATTACHMENT _ LIMIT _ EXCEEDED : You attempted to exceed the number of policies of a certain * type that can be attached to an entity at one time . * < / li > * < li > * MIN _ POLICY _ TYPE _ ATTACHMENT _ LIMIT _ EXCEEDED : You attempted to detach a policy from an entity that would * cause the entity to have fewer than the minimum number of policies of a certain type required . * < / li > * < li > * ACCOUNT _ CANNOT _ LEAVE _ WITHOUT _ EULA : You attempted to remove an account from the organization that doesn ' t * yet have enough information to exist as a standalone account . This account requires you to first agree to * the AWS Customer Agreement . Follow the steps at < a href = * " http : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ accounts _ remove . html # leave - without - all - info " * > To leave an organization when all required account information has not yet been provided < / a > in the * < i > AWS Organizations User Guide < / i > . * < / li > * < li > * ACCOUNT _ CANNOT _ LEAVE _ WITHOUT _ PHONE _ VERIFICATION : You attempted to remove an account from the organization * that doesn ' t yet have enough information to exist as a standalone account . This account requires you to * first complete phone verification . Follow the steps at < a href = * " http : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ accounts _ remove . html # leave - without - all - info " * > To leave an organization when all required account information has not yet been provided < / a > in the * < i > AWS Organizations User Guide < / i > . * < / li > * < li > * MASTER _ ACCOUNT _ PAYMENT _ INSTRUMENT _ REQUIRED : To create an organization with this master account , you first * must associate a valid payment instrument , such as a credit card , with the account . Follow the steps at * < a href = * " http : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ accounts _ remove . html # leave - without - all - info " * > To leave an organization when all required account information has not yet been provided < / a > in the * < i > AWS Organizations User Guide < / i > . * < / li > * < li > * MEMBER _ ACCOUNT _ PAYMENT _ INSTRUMENT _ REQUIRED : To complete this operation with this member account , you * first must associate a valid payment instrument , such as a credit card , with the account . Follow the * steps at < a href = * " http : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ accounts _ remove . html # leave - without - all - info " * > To leave an organization when all required account information has not yet been provided < / a > in the * < i > AWS Organizations User Guide < / i > . * < / li > * < li > * ACCOUNT _ CREATION _ RATE _ LIMIT _ EXCEEDED : You attempted to exceed the number of accounts that you can create * in one day . * < / li > * < li > * MASTER _ ACCOUNT _ ADDRESS _ DOES _ NOT _ MATCH _ MARKETPLACE : To create an account in this organization , you first * must migrate the organization ' s master account to the marketplace that corresponds to the master * account ' s address . For example , accounts with India addresses must be associated with the AISPL * marketplace . All accounts in an organization must be associated with the same marketplace . * < / li > * < li > * MASTER _ ACCOUNT _ MISSING _ CONTACT _ INFO : To complete this operation , you must first provide contact a valid * address and phone number for the master account . Then try the operation again . * < / li > * < li > * MASTER _ ACCOUNT _ NOT _ GOVCLOUD _ ENABLED : To complete this operation , the master account must have an * associated account in the AWS GovCloud ( US - West ) Region . For more information , see < a * href = " http : / / docs . aws . amazon . com / govcloud - us / latest / UserGuide / govcloud - organizations . html " > AWS * Organizations < / a > in the < i > AWS GovCloud User Guide . < / i > * < / li > * @ throws InvalidInputException * The requested operation failed because you provided invalid values for one or more of the request * parameters . This exception includes a reason that contains additional information about the violated * limit : < / p > < note > * Some of the reasons in the following list might not be applicable to this specific API or operation : * < / note > * < ul > * < li > * IMMUTABLE _ POLICY : You specified a policy that is managed by AWS and can ' t be modified . * < / li > * < li > * INPUT _ REQUIRED : You must include a value for all required parameters . * < / li > * < li > * INVALID _ ENUM : You specified a value that isn ' t valid for that parameter . * < / li > * < li > * INVALID _ FULL _ NAME _ TARGET : You specified a full name that contains invalid characters . * < / li > * < li > * INVALID _ LIST _ MEMBER : You provided a list to a parameter that contains at least one invalid value . * < / li > * < li > * INVALID _ PARTY _ TYPE _ TARGET : You specified the wrong type of entity ( account , organization , or email ) as a * party . * < / li > * < li > * INVALID _ PAGINATION _ TOKEN : Get the value for the < code > NextToken < / code > parameter from the response to a * previous call of the operation . * < / li > * < li > * INVALID _ PATTERN : You provided a value that doesn ' t match the required pattern . * < / li > * < li > * INVALID _ PATTERN _ TARGET _ ID : You specified a policy target ID that doesn ' t match the required pattern . * < / li > * < li > * INVALID _ ROLE _ NAME : You provided a role name that isn ' t valid . A role name can ' t begin with the reserved * prefix < code > AWSServiceRoleFor < / code > . * < / li > * < li > * INVALID _ SYNTAX _ ORGANIZATION _ ARN : You specified an invalid Amazon Resource Name ( ARN ) for the * organization . * < / li > * < li > * INVALID _ SYNTAX _ POLICY _ ID : You specified an invalid policy ID . * < / li > * < li > * MAX _ FILTER _ LIMIT _ EXCEEDED : You can specify only one filter parameter for the operation . * < / li > * < li > * MAX _ LENGTH _ EXCEEDED : You provided a string parameter that is longer than allowed . * < / li > * < li > * MAX _ VALUE _ EXCEEDED : You provided a numeric parameter that has a larger value than allowed . * < / li > * < li > * MIN _ LENGTH _ EXCEEDED : You provided a string parameter that is shorter than allowed . * < / li > * < li > * MIN _ VALUE _ EXCEEDED : You provided a numeric parameter that has a smaller value than allowed . * < / li > * < li > * MOVING _ ACCOUNT _ BETWEEN _ DIFFERENT _ ROOTS : You can move an account only between entities in the same root . * < / li > * @ throws FinalizingOrganizationException * AWS Organizations couldn ' t perform the operation because your organization hasn ' t finished initializing . * This can take up to an hour . Try again later . If after one hour you continue to receive this error , * contact < a href = " https : / / console . aws . amazon . com / support / home # / " > AWS Support < / a > . * @ throws ServiceException * AWS Organizations can ' t complete your request because of an internal service error . Try again later . * @ throws TooManyRequestsException * You ' ve sent too many requests in too short a period of time . The limit helps protect against * denial - of - service attacks . Try again later . < / p > * For information on limits that affect Organizations , see < a * href = " https : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ reference _ limits . html " > Limits of * AWS Organizations < / a > in the < i > AWS Organizations User Guide < / i > . * @ throws UnsupportedAPIEndpointException * This action isn ' t available in the current Region . * @ sample AWSOrganizations . CreateGovCloudAccount * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / organizations - 2016-11-28 / CreateGovCloudAccount " * target = " _ top " > AWS API Documentation < / a > */ @ Override public CreateGovCloudAccountResult createGovCloudAccount ( CreateGovCloudAccountRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeCreateGovCloudAccount ( request ) ;
public class Diagram { /** * Add an additional SSExtension * @ param ssExt the ssextension to set */ public boolean addSsextension ( String ssExt ) { } }
if ( this . ssextensions == null ) { this . ssextensions = new ArrayList < String > ( ) ; } return this . ssextensions . add ( ssExt ) ;
public class HtmlUtils { /** * Fake Class # getSimpleName logic . */ static String getClassSimpleName ( String className ) { } }
int lastPeriod = className . lastIndexOf ( "." ) ; if ( lastPeriod != - 1 ) { return className . substring ( lastPeriod + 1 ) ; } return className ;
public class GetRecordsResult { /** * The stream records from the shard , which were retrieved using the shard iterator . * @ param records * The stream records from the shard , which were retrieved using the shard iterator . */ public void setRecords ( java . util . Collection < Record > records ) { } }
if ( records == null ) { this . records = null ; return ; } this . records = new java . util . ArrayList < Record > ( records ) ;
public class EmailValidator { /** * { @ inheritDoc } check if given string is a valid mail . * @ see javax . validation . ConstraintValidator # isValid ( java . lang . Object , * javax . validation . ConstraintValidatorContext ) */ @ Override public final boolean isValid ( final String pvalue , final ConstraintValidatorContext pcontext ) { } }
if ( StringUtils . isEmpty ( pvalue ) ) { return true ; } if ( pvalue . length ( ) > LENGTH_MAIL ) { // Email is to long , but that ' s handled by size annotation return true ; } if ( ! StringUtils . equals ( pvalue , StringUtils . trim ( pvalue ) ) ) { // mail contains leading or trailing space ( s ) , that ' s not correct return false ; } return org . apache . commons . validator . routines . EmailValidator . getInstance ( ) . isValid ( pvalue ) ;
public class Table { /** * Given a key , return an approximate byte offset in the file where * the data for that key begins ( or would begin if the key were * present in the file ) . The returned value is in terms of file * bytes , and so includes effects like compression of the underlying data . * For example , the approximate offset of the last key in the table will * be close to the file length . */ public long getApproximateOffsetOf ( Slice key ) { } }
BlockIterator iterator = indexBlock . iterator ( ) ; iterator . seek ( key ) ; if ( iterator . hasNext ( ) ) { BlockHandle blockHandle = BlockHandle . readBlockHandle ( iterator . next ( ) . getValue ( ) . input ( ) ) ; return blockHandle . getOffset ( ) ; } // key is past the last key in the file . Approximate the offset // by returning the offset of the metaindex block ( which is // right near the end of the file ) . return metaindexBlockHandle . getOffset ( ) ;
public class PoolStopResizeOptions { /** * Set a timestamp indicating the last modified time of the resource known to the client . The operation will be performed only if the resource on the service has not been modified since the specified time . * @ param ifUnmodifiedSince the ifUnmodifiedSince value to set * @ return the PoolStopResizeOptions object itself . */ public PoolStopResizeOptions withIfUnmodifiedSince ( DateTime ifUnmodifiedSince ) { } }
if ( ifUnmodifiedSince == null ) { this . ifUnmodifiedSince = null ; } else { this . ifUnmodifiedSince = new DateTimeRfc1123 ( ifUnmodifiedSince ) ; } return this ;
public class StorageAccountsInner { /** * Checks that the storage account name is valid and is not already in use . * @ param name The storage account name . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the CheckNameAvailabilityResultInner object */ public Observable < CheckNameAvailabilityResultInner > checkNameAvailabilityAsync ( String name ) { } }
return checkNameAvailabilityWithServiceResponseAsync ( name ) . map ( new Func1 < ServiceResponse < CheckNameAvailabilityResultInner > , CheckNameAvailabilityResultInner > ( ) { @ Override public CheckNameAvailabilityResultInner call ( ServiceResponse < CheckNameAvailabilityResultInner > response ) { return response . body ( ) ; } } ) ;
public class DynamicOutputBuffer { /** * Gets the buffer that holds the byte at the given absolute position . * Automatically adds new internal buffers if the position lies outside * the current range of all internal buffers . * @ param position the position * @ return the buffer at the requested position */ protected ByteBuffer getBuffer ( int position ) { } }
int n = position / _bufferSize ; while ( n >= _buffers . size ( ) ) { addNewBuffer ( ) ; } return _buffers . get ( n ) ;
public class RtfDestinationFontTable { /** * Create a font via the < code > FontFactory < / code > * @ param fontName The font name to create * @ return The created < code > Font < / code > object * @ since 2.0.8 */ private Font createfont ( String fontName ) { } }
Font f1 = null ; int pos = - 1 ; do { f1 = FontFactory . getFont ( fontName ) ; if ( f1 . getBaseFont ( ) != null ) break ; // found a font , exit the do / while pos = fontName . lastIndexOf ( ' ' ) ; // find the last space if ( pos > 0 ) { fontName = fontName . substring ( 0 , pos ) ; // truncate it to the last space } } while ( pos > 0 ) ; return f1 ;
public class HttpUtils { /** * Convert D2 URL template into a string used for throttling limiter * Valid : * d2 : / / host / $ { resource - id } * Invalid : * d2 : / / host $ { resource - id } , because we cannot differentiate the host */ public static String createR2ClientLimiterKey ( Config config ) { } }
String urlTemplate = config . getString ( HttpConstants . URL_TEMPLATE ) ; try { String escaped = URIUtil . encodeQuery ( urlTemplate ) ; URI uri = new URI ( escaped ) ; if ( uri . getHost ( ) == null ) throw new RuntimeException ( "Cannot get host part from uri" + urlTemplate ) ; String key = uri . getScheme ( ) + "/" + uri . getHost ( ) ; if ( uri . getPort ( ) > 0 ) { key = key + "/" + uri . getPort ( ) ; } log . info ( "Get limiter key [" + key + "]" ) ; return key ; } catch ( Exception e ) { throw new RuntimeException ( "Cannot create R2 limiter key" , e ) ; }
public class BlockDataHandler { /** * Removes the custom data stored at the { @ link BlockPos } for the specified identifier and eventually sends it to clients watching the * chunk . * @ param < T > the generic type * @ param identifier the identifier * @ param world the world * @ param pos the pos * @ param sendToClients the send to clients */ public static < T > void removeData ( String identifier , IBlockAccess world , BlockPos pos , boolean sendToClients ) { } }
setData ( identifier , world , pos , null , sendToClients ) ;
public class ParseUtils { /** * Get an exception reporting an unexpected XML element . * @ param reader the stream reader * @ return the exception */ public static XMLStreamException unexpectedElement ( final XMLExtendedStreamReader reader , Set < String > possible ) { } }
final XMLStreamException ex = ControllerLogger . ROOT_LOGGER . unexpectedElement ( reader . getName ( ) , asStringList ( possible ) , reader . getLocation ( ) ) ; return new XMLStreamValidationException ( ex . getMessage ( ) , ValidationError . from ( ex , ErrorType . UNEXPECTED_ELEMENT ) . element ( reader . getName ( ) ) . alternatives ( possible ) , ex ) ;
public class Selenium2Script { /** * Selenium IDEのテストスクリプト ( html ) をSIT - WTのテストスクリプト ( csv ) に変換します 。 * @ return 0 : 正常終了 */ public int execute ( ) { } }
int ret = 0 ; for ( String seleniumScriptDir : seleniumScriptDirs . split ( "," ) ) { File scriptDir = new File ( seleniumScriptDir ) ; if ( ! scriptDir . exists ( ) ) { continue ; } boolean recursive = ! "." . equals ( seleniumScriptDir ) ; for ( File seleniumScript : FileUtils . listFiles ( scriptDir , new String [ ] { "html" } , recursive ) ) { File sitScript = convert ( seleniumScript ) ; backup ( seleniumScript ) ; if ( isOpenScript ( ) ) { try { Desktop . getDesktop ( ) . open ( sitScript ) ; } catch ( IOException e ) { log . error ( "open.script.error" , e ) ; ret = 2 ; } } } } return ret ;
public class Config { /** * Read config object stored in YAML format from < code > String < / code > * @ param content of config * @ return config * @ throws IOException error */ public static Config fromYAML ( String content ) throws IOException { } }
ConfigSupport support = new ConfigSupport ( ) ; return support . fromYAML ( content , Config . class ) ;
public class CPDAvailabilityEstimateUtil { /** * Returns the cpd availability estimate where CProductId = & # 63 ; or throws a { @ link NoSuchCPDAvailabilityEstimateException } if it could not be found . * @ param CProductId the c product ID * @ return the matching cpd availability estimate * @ throws NoSuchCPDAvailabilityEstimateException if a matching cpd availability estimate could not be found */ public static CPDAvailabilityEstimate findByCProductId ( long CProductId ) throws com . liferay . commerce . exception . NoSuchCPDAvailabilityEstimateException { } }
return getPersistence ( ) . findByCProductId ( CProductId ) ;
public class JoinListener { /** * Automatically create a join from a node and a join class . * This will automatically get the left and right hand refs * and will construct a new join specified by the type using reflection . * It will also add an parsed location to the join . * @ node * @ type00 */ private void join ( ParserRuleContext ctx , Class < ? extends Join > type ) { } }
QueryNode left = relationChain . get ( relationIdx ) ; QueryNode right = relationChain . get ( relationIdx + 1 ) ; try { Constructor < ? extends Join > c = type . getConstructor ( QueryNode . class ) ; Join newJoin = c . newInstance ( right ) ; left . addOutgoingJoin ( addParsedLocation ( ctx , newJoin ) ) ; } catch ( NoSuchMethodException ex ) { log . error ( null , ex ) ; } catch ( InstantiationException ex ) { log . error ( null , ex ) ; } catch ( IllegalAccessException ex ) { log . error ( null , ex ) ; } catch ( InvocationTargetException ex ) { log . error ( null , ex ) ; }
public class InodeLockManager { /** * Attempts to acquire an inode lock . * @ param inodeId the inode id to try locking * @ param mode the mode to lock in * @ return either an empty optional , or a lock resource which must be closed to release the lock */ public Optional < LockResource > tryLockInode ( Long inodeId , LockMode mode ) { } }
return mInodeLocks . tryGet ( inodeId , mode ) ;
public class JSONTokener { /** * Get the hex value of a character ( base16 ) . < p > * @ param c a character between ' 0 ' and ' 9 ' or between ' A ' and ' F ' or * between ' a ' and ' f ' * @ return an int between 0 and 15 , or - 1 if c was not a hex digit */ public static int dehexchar ( char c ) { } }
if ( ( c >= '0' ) && ( c <= '9' ) ) { return c - '0' ; } if ( ( c >= 'A' ) && ( c <= 'F' ) ) { return c - ( 'A' - 10 ) ; } if ( ( c >= 'a' ) && ( c <= 'f' ) ) { return c - ( 'a' - 10 ) ; } return - 1 ;
public class CPDefinitionOptionValueRelLocalServiceWrapper { /** * Deletes the cp definition option value rel from the database . Also notifies the appropriate model listeners . * @ param cpDefinitionOptionValueRel the cp definition option value rel * @ return the cp definition option value rel that was removed * @ throws PortalException */ @ Override public com . liferay . commerce . product . model . CPDefinitionOptionValueRel deleteCPDefinitionOptionValueRel ( com . liferay . commerce . product . model . CPDefinitionOptionValueRel cpDefinitionOptionValueRel ) throws com . liferay . portal . kernel . exception . PortalException { } }
return _cpDefinitionOptionValueRelLocalService . deleteCPDefinitionOptionValueRel ( cpDefinitionOptionValueRel ) ;
public class BookmarksApi { /** * List bookmarks A list of your character & # 39 ; s personal bookmarks - - - This * route is cached for up to 3600 seconds SSO Scope : * esi - bookmarks . read _ character _ bookmarks . v1 * @ param characterId * An EVE character ID ( required ) * @ param datasource * The server name you would like data from ( optional , default to * tranquility ) * @ param ifNoneMatch * ETag from a previous request . A 304 will be returned if this * matches the current ETag ( optional ) * @ param page * Which page of results to return ( optional , default to 1) * @ param token * Access token to use if unable to set a header ( optional ) * @ return ApiResponse & lt ; List & lt ; CharacterBookmarksResponse & gt ; & gt ; * @ throws ApiException * If fail to call the API , e . g . server error or cannot * deserialize the response body */ public ApiResponse < List < CharacterBookmarksResponse > > getCharactersCharacterIdBookmarksWithHttpInfo ( Integer characterId , String datasource , String ifNoneMatch , Integer page , String token ) throws ApiException { } }
com . squareup . okhttp . Call call = getCharactersCharacterIdBookmarksValidateBeforeCall ( characterId , datasource , ifNoneMatch , page , token , null ) ; Type localVarReturnType = new TypeToken < List < CharacterBookmarksResponse > > ( ) { } . getType ( ) ; return apiClient . execute ( call , localVarReturnType ) ;
public class ConcurrentMultiCache { /** * Places the specified item in the cache . It will not replace an existing * item in the cache . Instead , if an item already exists in the cache , it * will make sure that item is cached across all identifiers and then return * the cached item . * @ param item the item to be cached * @ return whatever item is in the cache after this operation */ public T cache ( T item ) { } }
HashMap < String , Object > keys ; if ( item == null ) { throw new NullPointerException ( "Multi caches may not have null values." ) ; } keys = getKeys ( item ) ; synchronized ( this ) { item = getCurrent ( item ) ; for ( String key : caches . keySet ( ) ) { ConcurrentCache < Object , T > cache = caches . get ( key ) ; cache . put ( keys . get ( key ) , item ) ; } return item ; }
public class InternalSARLParser { /** * InternalSARL . g : 13146:1 : entryRuleXPostfixOperation returns [ EObject current = null ] : iv _ ruleXPostfixOperation = ruleXPostfixOperation EOF ; */ public final EObject entryRuleXPostfixOperation ( ) throws RecognitionException { } }
EObject current = null ; EObject iv_ruleXPostfixOperation = null ; try { // InternalSARL . g : 13146:58 : ( iv _ ruleXPostfixOperation = ruleXPostfixOperation EOF ) // InternalSARL . g : 13147:2 : iv _ ruleXPostfixOperation = ruleXPostfixOperation EOF { if ( state . backtracking == 0 ) { newCompositeNode ( grammarAccess . getXPostfixOperationRule ( ) ) ; } pushFollow ( FOLLOW_1 ) ; iv_ruleXPostfixOperation = ruleXPostfixOperation ( ) ; state . _fsp -- ; if ( state . failed ) return current ; if ( state . backtracking == 0 ) { current = iv_ruleXPostfixOperation ; } match ( input , EOF , FOLLOW_2 ) ; if ( state . failed ) return current ; } } catch ( RecognitionException re ) { recover ( input , re ) ; appendSkippedTokens ( ) ; } finally { } return current ;