signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class Utilities { /** * Replaces occurences of non - alphanumeric characters with a supplied char . */ public static String replaceNonAlphanumeric ( final String str , final char subst ) { } }
final StringBuffer ret = new StringBuffer ( str . length ( ) ) ; final char [ ] testChars = str . toCharArray ( ) ; for ( final char testChar : testChars ) { if ( Character . isLetterOrDigit ( testChar ) ) { ret . append ( testChar ) ; } else { ret . append ( subst ) ; } } return ret . toString ( ) ;
public class CommerceOrderItemPersistenceImpl { /** * Clears the cache for all commerce order items . * The { @ link EntityCache } and { @ link FinderCache } are both cleared by this method . */ @ Override public void clearCache ( ) { } }
entityCache . clearCache ( CommerceOrderItemImpl . class ) ; finderCache . clearCache ( FINDER_CLASS_NAME_ENTITY ) ; finderCache . clearCache ( FINDER_CLASS_NAME_LIST_WITH_PAGINATION ) ; finderCache . clearCache ( FINDER_CLASS_NAME_LIST_WITHOUT_PAGINATION ) ;
public class Utils { /** * Generates a random string with the specified length . * @ param length * @ return */ public static String generateNonce ( int length ) { } }
if ( length < 0 ) { if ( tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Negative length provided. Will default to nonce of length " + NONCE_LENGTH ) ; } length = NONCE_LENGTH ; } StringBuilder randomString = new StringBuilder ( ) ; SecureRandom r = new SecureRandom ( ) ; for ( int i = 0 ; i < length ; i ++ ) { int index = r . nextInt ( chars . length ) ; randomString . append ( chars [ index ] ) ; } return randomString . toString ( ) ;
public class Saved { /** * / / @ Override */ public Parsed < State > newParsed ( ) { } }
Parsed < State > ps = new Parsed < State > ( new State ( ) ) ; return ps ;
public class CommerceNotificationQueueEntryPersistenceImpl { /** * Removes all the commerce notification queue entries where sent = & # 63 ; from the database . * @ param sent the sent */ @ Override public void removeBySent ( boolean sent ) { } }
for ( CommerceNotificationQueueEntry commerceNotificationQueueEntry : findBySent ( sent , QueryUtil . ALL_POS , QueryUtil . ALL_POS , null ) ) { remove ( commerceNotificationQueueEntry ) ; }
public class StringUtil { /** * Join a collection of strings by a separator * @ param strings iterator of string objects * @ param sep string to place between strings * @ return joined string */ public static String join ( Iterator strings , String sep ) { } }
if ( ! strings . hasNext ( ) ) return "" ; String start = strings . next ( ) . toString ( ) ; if ( ! strings . hasNext ( ) ) // only one , avoid builder return start ; StringBuilder sb = StringUtil . borrowBuilder ( ) . append ( start ) ; while ( strings . hasNext ( ) ) { sb . append ( sep ) ; sb . append ( strings . next ( ) ) ; } return StringUtil . releaseBuilder ( sb ) ;
public class CampaignGroupPerformanceTarget { /** * Gets the performanceTarget value for this CampaignGroupPerformanceTarget . * @ return performanceTarget * The main configuration of the performance target . * < span class = " constraint Required " > This field is required * and should not be { @ code null } when it is contained within { @ link * Operator } s : ADD , SET . < / span > */ public com . google . api . ads . adwords . axis . v201809 . cm . PerformanceTarget getPerformanceTarget ( ) { } }
return performanceTarget ;
public class Parser { /** * default */ private Node parseDefault ( ) { } }
expect ( Default . class ) ; Node when = new CaseNode . When ( ) ; when . setValue ( "default" ) ; when . setBlock ( this . parseBlockExpansion ( ) ) ; return when ;
public class QueryExecuter { /** * Extracts the querible interactions from the elements . * @ param elements BioPAX elements to search * @ param graph graph model * @ return Querible Interactions ( nodes ) */ public static Set < Node > getSeedInteractions ( Collection < BioPAXElement > elements , Graph graph ) { } }
Set < Node > nodes = new HashSet < Node > ( ) ; for ( BioPAXElement ele : elements ) { if ( ele instanceof Conversion || ele instanceof TemplateReaction || ele instanceof Control ) { GraphObject go = graph . getGraphObject ( ele ) ; if ( go instanceof Node ) { nodes . add ( ( Node ) go ) ; } } } return nodes ;
public class FSNamesystem { /** * We want " replication " replicates for the block , but we now have too many . * In this method , copy enough nodes from ' srcNodes ' into ' dstNodes ' such that : * srcNodes . size ( ) - dstNodes . size ( ) = = replication * We pick node that make sure that replicas are spread across racks and * also try hard to pick one with least free space . */ void chooseExcessReplicates ( Collection < DatanodeDescriptor > nonExcess , Block b , short replication , DatanodeDescriptor addedNode , DatanodeDescriptor delNodeHint , INodeFile inode , List < DatanodeID > excessReplicateMapTmp ) { } }
// first form a rack to datanodes map and HashMap < String , ArrayList < DatanodeDescriptor > > rackMap = new HashMap < String , ArrayList < DatanodeDescriptor > > ( ) ; for ( Iterator < DatanodeDescriptor > iter = nonExcess . iterator ( ) ; iter . hasNext ( ) ; ) { DatanodeDescriptor node = iter . next ( ) ; String rackName = node . getNetworkLocation ( ) ; ArrayList < DatanodeDescriptor > datanodeList = rackMap . get ( rackName ) ; if ( datanodeList == null ) { datanodeList = new ArrayList < DatanodeDescriptor > ( ) ; } datanodeList . add ( node ) ; rackMap . put ( rackName , datanodeList ) ; } // split nodes into two sets // priSet contains nodes on rack with more than one replica // remains contains the remaining nodes // It may be useful for the corresponding BlockPlacementPolicy . ArrayList < DatanodeDescriptor > priSet = new ArrayList < DatanodeDescriptor > ( ) ; ArrayList < DatanodeDescriptor > remains = new ArrayList < DatanodeDescriptor > ( ) ; for ( Iterator < Entry < String , ArrayList < DatanodeDescriptor > > > iter = rackMap . entrySet ( ) . iterator ( ) ; iter . hasNext ( ) ; ) { Entry < String , ArrayList < DatanodeDescriptor > > rackEntry = iter . next ( ) ; ArrayList < DatanodeDescriptor > datanodeList = rackEntry . getValue ( ) ; if ( datanodeList . size ( ) == 1 ) { remains . add ( datanodeList . get ( 0 ) ) ; } else { priSet . addAll ( datanodeList ) ; } } // pick one node to delete that favors the delete hint // otherwise follow the strategy of corresponding BlockPlacementPolicy . boolean firstOne = true ; while ( nonExcess . size ( ) - replication > 0 ) { DatanodeInfo cur = null ; long minSpace = Long . MAX_VALUE ; // check if we can del delNodeHint if ( firstOne && delNodeHint != null && nonExcess . contains ( delNodeHint ) && ( priSet . contains ( delNodeHint ) || ( addedNode != null && ! priSet . contains ( addedNode ) ) ) ) { cur = delNodeHint ; } else { // regular excessive replica removal cur = replicator . chooseReplicaToDelete ( inode , b , replication , priSet , remains ) ; } firstOne = false ; // adjust rackmap , priSet , and remains String rack = cur . getNetworkLocation ( ) ; ArrayList < DatanodeDescriptor > datanodes = rackMap . get ( rack ) ; datanodes . remove ( cur ) ; if ( datanodes . isEmpty ( ) ) { rackMap . remove ( rack ) ; } if ( priSet . remove ( cur ) ) { if ( datanodes . size ( ) == 1 ) { priSet . remove ( datanodes . get ( 0 ) ) ; remains . add ( datanodes . get ( 0 ) ) ; } } else { remains . remove ( cur ) ; } nonExcess . remove ( cur ) ; excessReplicateMapTmp . add ( cur ) ; if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "BLOCK* NameSystem.chooseExcessReplicates: " + "(" + cur . getName ( ) + ", " + b + ") is added to excessReplicateMapTmp" ) ; } }
public class OfflineEditsViewer { /** * Process EditLog file . * @ param visitor use this visitor to process the file */ public void go ( EditsVisitor visitor ) throws IOException { } }
setEditsLoader ( EditsLoader . LoaderFactory . getLoader ( visitor ) ) ; editsLoader . loadEdits ( ) ;
public class SqlBuilder { /** * 构建预编译SQL所需要的SQL和变量 * 将 < key , value > 的值对转换为 PreperedStatement使用的 < index , value > */ private Sql getPreperSQLWithParameters ( String query , Map < String , Object > parameters ) { } }
// 检索query语句中所有 : key的标记 Pattern pattern = Pattern . compile ( ":\\w+" ) ; Matcher matcher = pattern . matcher ( query ) ; // 构建 < key , index > 值对 int index = 1 ; Map < String , Integer > keyIndexs = new HashMap < String , Integer > ( ) ; while ( matcher . find ( ) ) { String word = matcher . group ( ) ; if ( word . startsWith ( ":" ) ) { keyIndexs . put ( word . substring ( 1 ) , index ++ ) ; } } // 构建 < index , value > 值对 Map < Integer , Object > prepareValue = new HashMap < Integer , Object > ( ) ; Set < String > paramKeySet = parameters . keySet ( ) ; for ( String paramKey : paramKeySet ) { Integer preIndex = keyIndexs . get ( paramKey ) ; Object preValue = parameters . get ( paramKey ) ; prepareValue . put ( preIndex , preValue ) ; } // 替换所有的变量为 " ? " query = matcher . replaceAll ( "?" ) ; Sql sql = new Sql ( query , parameters ) ; sql . setPreperStatement ( true ) ; sql . setKeyIndexs ( keyIndexs ) ; sql . setPreperValues ( prepareValue ) ; return sql ;
public class Ifc2x3tc1FactoryImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public String convertIfcChillerTypeEnumToString ( EDataType eDataType , Object instanceValue ) { } }
return instanceValue == null ? null : instanceValue . toString ( ) ;
public class IpHelper { /** * Parses an IP address , returning it as an InetAddress < br / > * This method exists to prevent code which is handling valid IP addresses from having to catch UnknownHostException * excessively ( when there is often no choice but to totally fail out anyway ) * @ param ip * a valid IP address ( IPv4 or IPv6) * @ return the resulting InetAddress for that ip ; this is equivalent to calling < code > InetAddress . getByName < / code > on the IP * ( but without having to catch UnknownHostException ) * @ throws IllegalArgumentException * if the IP address is invalid ( eg . null , an empty string or otherwise not in the valid IP address format ) */ public static InetAddress stoa ( final String ip ) { } }
if ( ip == null || ip . isEmpty ( ) ) throw new IllegalArgumentException ( "must pass a valid ip: null or empty strings are not valid IPs!" ) ; try { return InetAddress . getByName ( ip ) ; } catch ( UnknownHostException e ) { throw new IllegalArgumentException ( "must pass a valid ip. Illegal input was: " + ip , e ) ; }
public class RepositoryTypeClass { /** * < p > Getter for the field < code > envType < / code > . < / p > * @ return a { @ link com . greenpepper . server . domain . EnvironmentType } object . */ @ ManyToOne ( cascade = { } }
CascadeType . PERSIST , CascadeType . MERGE } ) @ JoinColumn ( name = "ENVIRONMENT_TYPE_ID" , nullable = false ) public EnvironmentType getEnvType ( ) { return envType ;
public class CmsSetupBean { /** * Generates the HTML code for the drop down for db selection . < p > * @ return the generated HTML */ public String getHtmlForDbSelection ( ) { } }
StringBuffer buf = new StringBuffer ( 2048 ) ; buf . append ( "<select name=\"fullDatabaseKey\" style=\"width: 250px;\" size=\"1\" onchange=\"location.href='../../step_3_database_selection.jsp?fullDatabaseKey='+this.options[this.selectedIndex].value;\">" ) ; buf . append ( "<!-- --------------------- JSP CODE --------------------------- -->" ) ; // get all available databases List < String > databases = getSortedDatabases ( ) ; // List all databases found in the dbsetup . properties if ( ( databases != null ) && ( databases . size ( ) > 0 ) ) { List < String > sqlDbs = new ArrayList < String > ( ) ; for ( String dbKey : databases ) { sqlDbs . add ( dbKey ) ; } // show the sql dbs first for ( String dbKey : sqlDbs ) { String dn = getDatabaseName ( dbKey ) ; String selected = "" ; if ( getFullDatabaseKey ( ) . equals ( dbKey + "_sql" ) ) { selected = "selected" ; } buf . append ( "<option value='" + dbKey + "_sql' " + selected + ">" + dn ) ; } } else { buf . append ( "<option value='null'>no database found" ) ; } buf . append ( "<!-- --------------------------------------------------------- -->" ) ; buf . append ( "</select>" ) ; return buf . toString ( ) ;
public class Ifc2x3tc1PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getIfcRationalBezierCurve ( ) { } }
if ( ifcRationalBezierCurveEClass == null ) { ifcRationalBezierCurveEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc2x3tc1Package . eNS_URI ) . getEClassifiers ( ) . get ( 425 ) ; } return ifcRationalBezierCurveEClass ;
public class Log { /** * Debugging . */ public static final void d ( Object ... messages ) { } }
if ( Config . DEBUG_V ) { print ( DEBUG_TEXT + ": " ) ; for ( int i = 0 ; i < messages . length ; i ++ ) { print ( messages [ i ] ) ; if ( i != messages . length - 1 ) { print ( " " ) ; } } println ( "." ) ; }
public class TypeDiscoverer { /** * Resolves the given type into a { @ link Type } . * @ param type * @ return */ @ SuppressWarnings ( { } }
"unchecked" , "rawtypes" } ) protected Type resolveType ( Type type ) { Map < TypeVariable , Type > map = new HashMap < > ( ) ; map . putAll ( getTypeVariableMap ( ) ) ; return ResolvableType . forType ( type , new TypeVariableMapVariableResolver ( map ) ) . getType ( ) ;
public class SimpleMessageFormatter { /** * Returns a string representation of the user supplied formattable , accounting for any possible * runtime exceptions . * @ param value the value to be formatted . * @ return a best - effort string representation of the given value , even if exceptions were thrown . */ private static void safeFormatTo ( Formattable value , StringBuilder out , FormatOptions options ) { } }
// Only care about 3 specific flags for Formattable . int formatFlags = options . getFlags ( ) & ( FLAG_LEFT_ALIGN | FLAG_UPPER_CASE | FLAG_SHOW_ALT_FORM ) ; if ( formatFlags != 0 ) { // TODO : Maybe re - order the options flags to make this step easier or use a lookup table . // Note that reordering flags would require a rethink of how they are parsed . formatFlags = ( ( formatFlags & FLAG_LEFT_ALIGN ) != 0 ? FormattableFlags . LEFT_JUSTIFY : 0 ) | ( ( formatFlags & FLAG_UPPER_CASE ) != 0 ? FormattableFlags . UPPERCASE : 0 ) | ( ( formatFlags & FLAG_SHOW_ALT_FORM ) != 0 ? FormattableFlags . ALTERNATE : 0 ) ; } // We may need to undo an arbitrary amount of appending if there is an error . int originalLength = out . length ( ) ; Formatter formatter = new Formatter ( out , FORMAT_LOCALE ) ; try { value . formatTo ( formatter , formatFlags , options . getWidth ( ) , options . getPrecision ( ) ) ; } catch ( RuntimeException e ) { out . setLength ( originalLength ) ; // We only use a StringBuilder to create the Formatter instance . try { formatter . out ( ) . append ( getErrorString ( value , e ) ) ; } catch ( IOException impossible ) { } }
public class MapDataStores { /** * Used for providing neutral null behaviour . * @ param < K > type of key to store * @ param < V > type of value to store * @ return empty store manager */ public static < K , V > MapDataStore < K , V > emptyStore ( ) { } }
return ( MapDataStore < K , V > ) EMPTY_MAP_DATA_STORE ;
public class IntegerColumnsMathOpTransform { /** * Transform a sequence * @ param sequence */ @ Override public Object mapSequence ( Object sequence ) { } }
List < List < Integer > > seq = ( List < List < Integer > > ) sequence ; List < Integer > ret = new ArrayList < > ( ) ; for ( List < Integer > step : seq ) ret . add ( ( Integer ) map ( step ) ) ; return ret ;
public class VarOptItemsUnion { /** * this is a condition checked in detectAndHandleSubcaseOfPseudoExact ( ) */ private boolean thereExistUnmarkedHItemsLighterThanTarget ( final double threshold ) { } }
for ( int i = 0 ; i < gadget_ . getHRegionCount ( ) ; ++ i ) { if ( ( gadget_ . getWeight ( i ) < threshold ) && ! gadget_ . getMark ( i ) ) { return true ; } } return false ;
public class CompilerExtensions { /** * Factory method for create an { @ link URI } with the given uri string . * @ param uriString * the uri string * @ return the created { @ link URI } . * @ throws URISyntaxException * the URI syntax exception */ public static URI newURI ( final String uriString ) throws URISyntaxException { } }
final URI uri = new URI ( uriString ) ; return uri ;
public class CommandContext { /** * Stores the provided exception on this { @ link CommandContext } instance . * That exception will be rethrown at the end of closing the { @ link CommandContext } instance . * If there is already an exception being stored , a ' masked exception ' message will be logged . */ public void exception ( Throwable exception ) { } }
if ( this . exception == null ) { this . exception = exception ; } else { log . error ( "masked exception in command context. for root cause, see below as it will be rethrown later." , exception ) ; LogMDC . clear ( ) ; }
public class LifecyclePolicyRuleActionMarshaller { /** * Marshall the given parameter object . */ public void marshall ( LifecyclePolicyRuleAction lifecyclePolicyRuleAction , ProtocolMarshaller protocolMarshaller ) { } }
if ( lifecyclePolicyRuleAction == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( lifecyclePolicyRuleAction . getType ( ) , TYPE_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class MessageInteractionReader { /** * Make the request to the Twilio API to perform the read . * @ param client TwilioRestClient with which to make the request * @ return MessageInteraction ResourceSet */ @ Override @ SuppressWarnings ( "checkstyle:linelength" ) public Page < MessageInteraction > firstPage ( final TwilioRestClient client ) { } }
Request request = new Request ( HttpMethod . GET , Domains . PROXY . toString ( ) , "/v1/Services/" + this . pathServiceSid + "/Sessions/" + this . pathSessionSid + "/Participants/" + this . pathParticipantSid + "/MessageInteractions" , client . getRegion ( ) ) ; addQueryParams ( request ) ; return pageForRequest ( client , request ) ;
public class ClassUtils { /** * < p > invoke . < / p > * @ param constructor a { @ link java . lang . reflect . Constructor } object . * @ param args a { @ link java . lang . Object } object . * @ param < T > a T object . * @ return a T object . * @ throws java . lang . Throwable if any . */ public static < T > T invoke ( Constructor < T > constructor , Object ... args ) throws Throwable { } }
try { return constructor . newInstance ( args ) ; } catch ( InstantiationException e ) { throw e . getCause ( ) ; }
public class IfcTableImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ SuppressWarnings ( "unchecked" ) @ Override public EList < IfcTableColumn > getColumns ( ) { } }
return ( EList < IfcTableColumn > ) eGet ( Ifc4Package . Literals . IFC_TABLE__COLUMNS , true ) ;
public class Controller { /** * Run a task on threads supplied by injected { @ link ExecutorService } without a callback . By * default it runs tasks on separate threads by { @ link ExecutorService } injected from AndroidMvc * framework . A simple { @ link ExecutorService } that runs tasks on the same thread in test cases * to make the test easier . * < p > < b > * User the protected property { @ link UiThreadRunner } to post action back to main UI thread * in the method block of { @ link Task # execute ( Task . Monitor ) } . * < / b > < / p > * < pre > * uiThreadRunner . post ( new Runnable ( ) { * @ Override * public void run ( ) { * view . update ( ) ; * < / pre > * @ param task The task * @ return The monitor to track the state of the execution of the task . It also can cancel the * task . */ protected < RESULT > Task . Monitor < RESULT > runTask ( final Task < RESULT > task ) { } }
return runTask ( executorService , task , null ) ;
public class JsonRpcClient { /** * Invokes the given method on the remote service * passing the given arguments , a generated id and reads * a response . * @ param methodName the method to invoke * @ param argument the argument to pass to the method * @ param clazz the expected return type * @ param output the { @ link OutputStream } to write to * @ param input the { @ link InputStream } to read from * @ param < T > the expected return type * @ return the returned Object * @ throws Throwable on error * @ see # writeRequest ( String , Object , OutputStream , String ) */ @ SuppressWarnings ( "unchecked" ) public < T > T invokeAndReadResponse ( String methodName , Object argument , Class < T > clazz , OutputStream output , InputStream input ) throws Throwable { } }
return ( T ) invokeAndReadResponse ( methodName , argument , Type . class . cast ( clazz ) , output , input ) ;
public class MessageBatcher { /** * Stops the batcher . The Batcher has to wait for the other processes like * the Collector and the Executor to complete . It waits until it is notified * that the other processes have completed gracefully . The collector waits * until there are no more messages in the queue ( tries 3 times waiting for * 0.5 seconds each ) and then shuts down gracefully . */ public void stop ( ) { } }
/* * Sets the shutdown flag . Future sends to the batcher are not accepted . * The processors wait for the current messages in the queue and with * the processor or collector to complete */ isShutDown = true ; int waitTimeinMillis = CONFIGURATION . getBatcherWaitTimeBeforeShutdown ( this . name ) ; long timeToWait = waitTimeinMillis + System . currentTimeMillis ( ) ; while ( ( queue . size ( ) > 0 || batch . size ( ) > 0 ) && ( System . currentTimeMillis ( ) < timeToWait ) ) { try { Thread . sleep ( 1000 ) ; } catch ( InterruptedException e ) { break ; } } try { shouldCollectorShutdown = true ; processor . shutdownNow ( ) ; /* * processor . awaitTermination ( 10000 , TimeUnit . SECONDS ) ; if * ( ! processor . isShutdown ( ) ) { processor . shutdownNow ( ) ; } */ } catch ( Throwable e ) { // TODO Auto - generated catch block e . printStackTrace ( ) ; }
public class Signature { /** * similar to T extends MyClass < ? . . . > , if Java supported varargs wildcards */ public static TypeVariableConstraint withVariadicBound ( String name , String variadicBound ) { } }
return new TypeVariableConstraint ( name , false , false , variadicBound ) ;
public class ColumnVectorUtils { /** * Populates the entire ` col ` with ` row [ fieldIdx ] ` */ public static void populate ( WritableColumnVector col , InternalRow row , int fieldIdx ) { } }
int capacity = col . capacity ; DataType t = col . dataType ( ) ; if ( row . isNullAt ( fieldIdx ) ) { col . putNulls ( 0 , capacity ) ; } else { if ( t == DataTypes . BooleanType ) { col . putBooleans ( 0 , capacity , row . getBoolean ( fieldIdx ) ) ; } else if ( t == DataTypes . ByteType ) { col . putBytes ( 0 , capacity , row . getByte ( fieldIdx ) ) ; } else if ( t == DataTypes . ShortType ) { col . putShorts ( 0 , capacity , row . getShort ( fieldIdx ) ) ; } else if ( t == DataTypes . IntegerType ) { col . putInts ( 0 , capacity , row . getInt ( fieldIdx ) ) ; } else if ( t == DataTypes . LongType ) { col . putLongs ( 0 , capacity , row . getLong ( fieldIdx ) ) ; } else if ( t == DataTypes . FloatType ) { col . putFloats ( 0 , capacity , row . getFloat ( fieldIdx ) ) ; } else if ( t == DataTypes . DoubleType ) { col . putDoubles ( 0 , capacity , row . getDouble ( fieldIdx ) ) ; } else if ( t == DataTypes . StringType ) { UTF8String v = row . getUTF8String ( fieldIdx ) ; byte [ ] bytes = v . getBytes ( ) ; for ( int i = 0 ; i < capacity ; i ++ ) { col . putByteArray ( i , bytes ) ; } } else if ( t instanceof DecimalType ) { DecimalType dt = ( DecimalType ) t ; Decimal d = row . getDecimal ( fieldIdx , dt . precision ( ) , dt . scale ( ) ) ; if ( dt . precision ( ) <= Decimal . MAX_INT_DIGITS ( ) ) { col . putInts ( 0 , capacity , ( int ) d . toUnscaledLong ( ) ) ; } else if ( dt . precision ( ) <= Decimal . MAX_LONG_DIGITS ( ) ) { col . putLongs ( 0 , capacity , d . toUnscaledLong ( ) ) ; } else { final BigInteger integer = d . toJavaBigDecimal ( ) . unscaledValue ( ) ; byte [ ] bytes = integer . toByteArray ( ) ; for ( int i = 0 ; i < capacity ; i ++ ) { col . putByteArray ( i , bytes , 0 , bytes . length ) ; } } } else if ( t instanceof CalendarIntervalType ) { CalendarInterval c = ( CalendarInterval ) row . get ( fieldIdx , t ) ; col . getChild ( 0 ) . putInts ( 0 , capacity , c . months ) ; col . getChild ( 1 ) . putLongs ( 0 , capacity , c . microseconds ) ; } else if ( t instanceof DateType ) { col . putInts ( 0 , capacity , row . getInt ( fieldIdx ) ) ; } else if ( t instanceof TimestampType ) { col . putLongs ( 0 , capacity , row . getLong ( fieldIdx ) ) ; } }
public class Agg { /** * Get a { @ link Collector } that calculates the < code > MIN ( ) < / code > function , producing multiple results . */ public static < T extends Comparable < ? super T > > Collector < T , ? , Seq < T > > minAll ( ) { } }
return minAllBy ( t -> t , naturalOrder ( ) ) ;
public class SwingUtil { /** * Create a comparator that compares against the distance from the specified point . * Note : The comparator will continue to sort by distance from the origin point , even if the * origin point ' s coordinates are modified after the comparator is created . * Used by positionRect ( ) . */ public static < P extends Point2D > Comparator < P > createPointComparator ( final P origin ) { } }
return new Comparator < P > ( ) { public int compare ( P p1 , P p2 ) { double dist1 = origin . distance ( p1 ) ; double dist2 = origin . distance ( p2 ) ; return ( dist1 > dist2 ) ? 1 : ( ( dist1 < dist2 ) ? - 1 : 0 ) ; } } ;
public class CmsSecurityManager { /** * Internal recursive method for deleting a resource . < p > * @ param dbc the db context * @ param resource the name of the resource to delete ( full path ) * @ param siblingMode indicates how to handle siblings of the deleted resource * @ throws CmsException if something goes wrong */ protected void deleteResource ( CmsDbContext dbc , CmsResource resource , CmsResource . CmsResourceDeleteMode siblingMode ) throws CmsException { } }
if ( resource . isFolder ( ) ) { // collect all resources in the folder ( but exclude deleted ones ) List < CmsResource > resources = m_driverManager . readChildResources ( dbc , resource , CmsResourceFilter . IGNORE_EXPIRATION , true , true , false ) ; Set < CmsUUID > deletedResources = new HashSet < CmsUUID > ( ) ; // now walk through all sub - resources in the folder for ( int i = 0 ; i < resources . size ( ) ; i ++ ) { CmsResource childResource = resources . get ( i ) ; if ( ( siblingMode == CmsResource . DELETE_REMOVE_SIBLINGS ) && deletedResources . contains ( childResource . getResourceId ( ) ) ) { // sibling mode is " delete all siblings " and another sibling of the current child resource has already // been deleted - do nothing and continue with the next child resource . continue ; } if ( childResource . isFolder ( ) ) { // recurse into this method for subfolders deleteResource ( dbc , childResource , siblingMode ) ; } else { // handle child resources m_driverManager . deleteResource ( dbc , childResource , siblingMode ) ; } deletedResources . add ( childResource . getResourceId ( ) ) ; } deletedResources . clear ( ) ; } // handle the resource itself m_driverManager . deleteResource ( dbc , resource , siblingMode ) ;
public class XATransactionWrapper { /** * Ends the work performed on behalf of a transaction branch . The resource manager disassociates the XA resource from the transaction branch specified and let the transaction * be completed . * < P > If TMSUSPEND is specified in flags , the transaction branch is temporarily suspended in incomplete state . The transaction context is in suspened state and must be resumed * via start with TMRESUME specified . * < P > If TMFAIL is specified , the portion of work has failed . The resource manager may mark the transaction as rollback - only * < P > If TMSUCCESS is specified , the portion of work has completed successfully . * @ param xid A global transaction identifier that is the same as what was used previously in the start method . * @ param flags One of TMSUCCESS , TMFAIL , or TMSUSPEND . * @ throws XAException An error has occurred . Possible XAException values are XAER _ RMERR , XAER _ RMFAILED , XAER _ NOTA , XAER _ INVAL , XAER _ PROTO , or XA _ RB * . */ @ Override public void end ( Xid xid , int flags ) throws XAException { } }
final boolean isTracingEnabled = TraceComponent . isAnyTracingEnabled ( ) ; if ( isTracingEnabled && tc . isEntryEnabled ( ) ) Tr . entry ( this , tc , "end" ) ; if ( getMcWrapper ( ) . isMCAborted ( ) ) { Tr . exit ( tc , "Connection was aborted. Exiting end." ) ; return ; } try { xaResource . end ( xid , flags ) ; } catch ( XAException e ) { processXAException ( e ) ; if ( flags == XAResource . TMFAIL && ( ( e . errorCode >= XAException . XA_RBBASE ) && ( e . errorCode <= XAException . XA_RBEND ) ) ) { // okay error codes for the rollback processing . } else { com . ibm . ws . ffdc . FFDCFilter . processException ( e , "com.ibm.ejs.j2c.XATransactionWrapper.end" , "417" , this ) ; if ( ! mcWrapper . isStale ( ) ) { Tr . error ( tc , "XA_RESOURCE_ADAPTER_OPERATION_ID_EXCP_J2CA0027" , "end" , xid , e , mcWrapper . gConfigProps . cfName ) ; } if ( isTracingEnabled && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "end" , e ) ; throw e ; } } catch ( Exception e ) { com . ibm . ws . ffdc . FFDCFilter . processException ( e , "com.ibm.ejs.j2c.XATransactionWrapper.end" , "423" , this ) ; if ( ! mcWrapper . shouldBeDestroyed ( ) ) { mcWrapper . markTransactionError ( ) ; Tr . error ( tc , "XA_RESOURCE_ADAPTER_OPERATION_ID_EXCP_J2CA0027" , "end" , xid , e , mcWrapper . gConfigProps . cfName ) ; } XAException xae = new XAException ( XAException . XAER_RMFAIL ) ; xae . initCause ( e ) ; if ( isTracingEnabled && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "end" , xae ) ; throw xae ; } if ( isTracingEnabled && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "end" ) ;
public class JsonLdUtils { /** * Returns true if the given value is a subject reference . * @ param v * the value to check . * @ return true if the value is a subject reference , false if not . */ static boolean isNodeReference ( Object v ) { } }
// Note : A value is a subject reference if all of these hold true : // 1 . It is an Object . // 2 . It has a single key : @ id . return ( v instanceof Map && ( ( Map < String , Object > ) v ) . size ( ) == 1 && ( ( Map < String , Object > ) v ) . containsKey ( "@id" ) ) ;
public class FlowNode { /** * Get the value of the flow in one of the surrounding direction . * @ param direction the { @ link Direction } . * @ return the flow value . */ public int getFlowAt ( Direction direction ) { } }
switch ( direction ) { case E : return eFlow ; case W : return wFlow ; case N : return nFlow ; case S : return sFlow ; case EN : return enFlow ; case NW : return nwFlow ; case WS : return wsFlow ; case SE : return seFlow ; default : throw new IllegalArgumentException ( ) ; }
public class AccumulatorUtil { /** * Create and register new { @ link Accumulator } for class / method level stats accumulation . * In case producer or annotation is null does nothing . * @ param producer * stats producer * @ param annotation * { @ link Accumulate } or { @ link AccumulateWithSubClasses } annotation used to create { @ link Accumulator } . * @ param method * { @ link Method } that was annotated or null in case ot class - level accumulator . */ public void createAccumulator ( final OnDemandStatsProducer producer , final A annotation , final Method method ) { } }
if ( producer != null && annotation != null ) { final String statsName = ( method == null ) ? OnDemandStatsProducer . CUMULATED_STATS_NAME : method . getName ( ) ; String accumulatorName = getName ( annotation ) ; if ( StringUtils . isEmpty ( accumulatorName ) ) accumulatorName = method == null ? formAccumulatorNameForClass ( producer , annotation ) : formAccumulatorNameForMethod ( producer , annotation , method ) ; createAccumulator ( producer . getProducerId ( ) , annotation , accumulatorName , statsName ) ; }
public class AWSPricingClient { /** * Returns a list of all products that match the filter criteria . * @ param getProductsRequest * @ return Result of the GetProducts operation returned by the service . * @ throws InternalErrorException * An error on the server occurred during the processing of your request . Try again later . * @ throws InvalidParameterException * One or more parameters had an invalid value . * @ throws NotFoundException * The requested resource can ' t be found . * @ throws InvalidNextTokenException * The pagination token is invalid . Try again without a pagination token . * @ throws ExpiredNextTokenException * The pagination token expired . Try again without a pagination token . * @ sample AWSPricing . GetProducts * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / pricing - 2017-10-15 / GetProducts " target = " _ top " > AWS API * Documentation < / a > */ @ Override public GetProductsResult getProducts ( GetProductsRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeGetProducts ( request ) ;
public class RICacheStatistics { /** * TODO : was package - level initially */ public void addGetTimeNano ( long duration ) { } }
if ( unsupportCacheGetTotalTime . get ( ) <= Long . MAX_VALUE - duration ) { unsupportCacheGetTotalTime . addAndGet ( duration ) ; } else { // counter full . Just reset . clear ( ) ; unsupportCacheGetTotalTime . set ( duration ) ; }
public class ElementMatchers { /** * Matches a parameter in its defined shape . * @ param matcher The matcher to apply to the matched parameter ' s defined shape . * @ param < T > The matched object ' s type . * @ return A matcher that matches a matched parameter ' s defined shape . */ public static < T extends ParameterDescription > ElementMatcher . Junction < T > definedParameter ( ElementMatcher < ? super ParameterDescription . InDefinedShape > matcher ) { } }
return new DefinedShapeMatcher < T , ParameterDescription . InDefinedShape > ( matcher ) ;
public class FSDirectoryManager { /** * { @ inheritDoc } */ public void init ( final String path ) throws IOException { } }
SecurityHelper . doPrivilegedIOExceptionAction ( new PrivilegedExceptionAction < Object > ( ) { public Object run ( ) throws Exception { baseDir = new File ( path ) ; return null ; } } ) ;
public class RetryableResource { /** * Handles common method invocations . */ boolean handleCommonMethods ( Object delegate , Method method , Object [ ] args ) throws Throwable { } }
if ( "abort" . equals ( method . getName ( ) ) || "close" . equals ( method . getName ( ) ) ) { try { Reflection . invoke ( delegate , method , args ) ; return true ; } finally { closed = true ; afterClosure ( ) ; interruptWaiters ( ) ; } } else if ( "addShutdownListener" . equals ( method . getName ( ) ) && args [ 0 ] != null ) shutdownListeners . add ( ( ShutdownListener ) args [ 0 ] ) ; else if ( "removeShutdownListener" . equals ( method . getName ( ) ) && args [ 0 ] != null ) shutdownListeners . remove ( ( ShutdownListener ) args [ 0 ] ) ; return false ;
public class TargetTable { /** * Set total target count and count of targets truncated in target table . */ private void resetTargetCountDetails ( ) { } }
final long totalTargetsCount = getTotalTargetsCount ( ) ; managementUIState . setTargetsCountAll ( totalTargetsCount ) ; final boolean noTagClicked = managementUIState . getTargetTableFilters ( ) . isNoTagSelected ( ) ; final Long distributionId = managementUIState . getTargetTableFilters ( ) . getDistributionSet ( ) . map ( DistributionSetIdName :: getId ) . orElse ( null ) ; final Long pinnedDistId = managementUIState . getTargetTableFilters ( ) . getPinnedDistId ( ) . orElse ( null ) ; final String searchText = managementUIState . getTargetTableFilters ( ) . getSearchText ( ) . map ( text -> { if ( StringUtils . isEmpty ( text ) ) { return null ; } return String . format ( "%%%s%%" , text ) ; } ) . orElse ( null ) ; String [ ] targetTags = null ; if ( isFilteredByTags ( ) ) { targetTags = managementUIState . getTargetTableFilters ( ) . getClickedTargetTags ( ) . toArray ( new String [ 0 ] ) ; } Collection < TargetUpdateStatus > status = null ; if ( isFilteredByStatus ( ) ) { status = managementUIState . getTargetTableFilters ( ) . getClickedStatusTargetTags ( ) ; } Boolean overdueState = null ; if ( managementUIState . getTargetTableFilters ( ) . isOverdueFilterEnabled ( ) ) { overdueState = managementUIState . getTargetTableFilters ( ) . isOverdueFilterEnabled ( ) ; } final long size = getTargetsCountWithFilter ( totalTargetsCount , pinnedDistId , new FilterParams ( status , overdueState , searchText , distributionId , noTagClicked , targetTags ) ) ; if ( size > SPUIDefinitions . MAX_TABLE_ENTRIES ) { managementUIState . setTargetsTruncated ( size - SPUIDefinitions . MAX_TABLE_ENTRIES ) ; }
public class DefaultGroovyMethods { /** * Counts the number of occurrences of the given value inside this array . * Comparison is done using Groovy ' s = = operator ( using * < code > compareTo ( value ) = = 0 < / code > or < code > equals ( value ) < / code > ) . * @ param self the array within which we count the number of occurrences * @ param value the value being searched for * @ return the number of occurrences * @ since 1.6.4 */ public static Number count ( char [ ] self , Object value ) { } }
return count ( InvokerHelper . asIterator ( self ) , value ) ;
public class FlexiBean { /** * Gets the value of the property as a { @ code int } using a default value . * @ param propertyName the property name , not empty * @ param defaultValue the default value for null or invalid property * @ return the value of the property * @ throws ClassCastException if the value is not compatible */ public int getInt ( String propertyName , int defaultValue ) { } }
Object obj = get ( propertyName ) ; return obj != null ? ( ( Number ) get ( propertyName ) ) . intValue ( ) : defaultValue ;
public class AbstractDataGridHtmlTag { /** * Create an un - indexed tag identifier for the given state object . * @ param state the { @ link AbstractHtmlState } upon which the tag identifier will be set once created * @ param tagId the base tag identifier * @ throws JspException */ protected final void applyTagId ( AbstractHtmlState state , String tagId ) throws JspException { } }
state . id = generateTagId ( tagId ) ;
public class LongTupleFunctions { /** * Applies the given binary operator to each pair of elements from the * given tuples , and stores the result in the given result tuple . * If the given result tuple is < code > null < / code > , then a new tuple * will be created and returned . * @ param t0 The first tuple * @ param t1 The second tuple * @ param op The operator to apply * @ param result The tuple that will store the result * @ return The result * @ throws IllegalArgumentException If the given tuples do not have * the same { @ link Tuple # getSize ( ) size } */ public static MutableLongTuple apply ( LongTuple t0 , LongTuple t1 , LongBinaryOperator op , MutableLongTuple result ) { } }
Utils . checkForEqualSize ( t0 , t1 ) ; result = LongTuples . validate ( t0 , result ) ; int n = t0 . getSize ( ) ; for ( int i = 0 ; i < n ; i ++ ) { long operand0 = t0 . get ( i ) ; long operand1 = t1 . get ( i ) ; long r = op . applyAsLong ( operand0 , operand1 ) ; result . set ( i , r ) ; } return result ;
public class RegionInstanceGroupManagerClient { /** * Retrieves the list of managed instance groups that are contained within the specified region . * < p > Sample code : * < pre > < code > * try ( RegionInstanceGroupManagerClient regionInstanceGroupManagerClient = RegionInstanceGroupManagerClient . create ( ) ) { * ProjectRegionName region = ProjectRegionName . of ( " [ PROJECT ] " , " [ REGION ] " ) ; * for ( InstanceGroupManager element : regionInstanceGroupManagerClient . listRegionInstanceGroupManagers ( region ) . iterateAll ( ) ) { * / / doThingsWith ( element ) ; * < / code > < / pre > * @ param region Name of the region scoping this request . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi public final ListRegionInstanceGroupManagersPagedResponse listRegionInstanceGroupManagers ( ProjectRegionName region ) { } }
ListRegionInstanceGroupManagersHttpRequest request = ListRegionInstanceGroupManagersHttpRequest . newBuilder ( ) . setRegion ( region == null ? null : region . toString ( ) ) . build ( ) ; return listRegionInstanceGroupManagers ( request ) ;
public class IfcStructuralConnectionImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ SuppressWarnings ( "unchecked" ) @ Override public EList < IfcRelConnectsStructuralMember > getConnectsStructuralMembers ( ) { } }
return ( EList < IfcRelConnectsStructuralMember > ) eGet ( Ifc4Package . Literals . IFC_STRUCTURAL_CONNECTION__CONNECTS_STRUCTURAL_MEMBERS , true ) ;
public class SubCommandMetaCheckVersion { /** * Verifies metadata versions for all the cluster nodes * @ param adminClient An instance of AdminClient points to given cluster */ public static void doMetaCheckVersion ( AdminClient adminClient ) { } }
Map < Properties , List < Integer > > versionsNodeMap = new HashMap < Properties , List < Integer > > ( ) ; for ( Integer nodeId : adminClient . getAdminClientCluster ( ) . getNodeIds ( ) ) { Versioned < Properties > versionedProp = doMetaGetVersionsForNode_ExitOnError ( adminClient , nodeId ) ; Properties props = versionedProp . getValue ( ) ; if ( versionsNodeMap . containsKey ( props ) == false ) { versionsNodeMap . put ( props , new ArrayList < Integer > ( ) ) ; } versionsNodeMap . get ( props ) . add ( nodeId ) ; } if ( versionsNodeMap . keySet ( ) . size ( ) <= 0 ) { System . err . println ( "No Versions found in the system store... something seriously wrong" ) ; } else if ( versionsNodeMap . keySet ( ) . size ( ) == 1 ) { System . err . println ( "All the nodes have the same metadata versions." ) ; printProperties ( versionsNodeMap . keySet ( ) . iterator ( ) . next ( ) ) ; } else { System . err . println ( "Mismatching versions detected !!! . All are supposed to be written by the same client " + "" + " and hence they should exactly match but something different, let us analyze deeper " ) ; Map < String , Map < String , List < Integer > > > propertyValueMap = new HashMap < String , Map < String , List < Integer > > > ( ) ; for ( Entry < Properties , List < Integer > > entry : versionsNodeMap . entrySet ( ) ) { System . out . println ( "**************************** Node(s): " + Arrays . toString ( entry . getValue ( ) . toArray ( ) ) + " ****************************" ) ; Properties props = entry . getKey ( ) ; printProperties ( props ) ; for ( String propName : props . stringPropertyNames ( ) ) { String propValue = props . getProperty ( propName ) ; if ( propertyValueMap . containsKey ( propName ) == false ) { propertyValueMap . put ( propName , new HashMap < String , List < Integer > > ( ) ) ; } Map < String , List < Integer > > valuetoNodeMap = propertyValueMap . get ( propName ) ; if ( valuetoNodeMap . containsKey ( propValue ) == false ) { valuetoNodeMap . put ( propValue , new ArrayList < Integer > ( ) ) ; } valuetoNodeMap . get ( propValue ) . addAll ( entry . getValue ( ) ) ; } } System . out . println ( "########## Properties discrepancy report ########" ) ; for ( Entry < String , Map < String , List < Integer > > > entry : propertyValueMap . entrySet ( ) ) { Map < String , List < Integer > > valueToNodeMap = entry . getValue ( ) ; String propName = entry . getKey ( ) ; List < Integer > allNodeIds = new ArrayList < Integer > ( ) ; allNodeIds . addAll ( adminClient . getAdminClientCluster ( ) . getNodeIds ( ) ) ; List < Integer > nodesWithValues = new ArrayList < Integer > ( ) ; if ( valueToNodeMap . size ( ) != 1 ) { System . out . println ( "Properties with multiple values" ) ; for ( Entry < String , List < Integer > > valueToNodeEntry : valueToNodeMap . entrySet ( ) ) { String propValue = valueToNodeEntry . getKey ( ) ; nodesWithValues . addAll ( valueToNodeEntry . getValue ( ) ) ; printProperty ( propName , propValue , valueToNodeEntry . getValue ( ) ) ; } } else { Map . Entry < String , List < Integer > > valueToNodeEntry = valueToNodeMap . entrySet ( ) . iterator ( ) . next ( ) ; nodesWithValues . addAll ( valueToNodeEntry . getValue ( ) ) ; if ( nodesWithValues . size ( ) < allNodeIds . size ( ) ) { String propValue = valueToNodeEntry . getKey ( ) ; printProperty ( propName , propValue , valueToNodeEntry . getValue ( ) ) ; } } allNodeIds . removeAll ( nodesWithValues ) ; if ( allNodeIds . size ( ) > 0 ) { System . out . println ( "The Property " + propName + " is present in the nodes " + Arrays . toString ( nodesWithValues . toArray ( ) ) + " but missing from the nodes " + Arrays . toString ( allNodeIds . toArray ( ) ) ) ; } } }
public class TransitionsExtractorImpl { /** * TransitionsExtractor */ @ Override public Map < Transition , Collection < TileRef > > getTransitions ( Collection < Media > levels , Media sheetsConfig , Media groupsConfig ) { } }
final Collection < MapTile > mapsSet = new HashSet < > ( levels . size ( ) ) ; for ( final Media level : levels ) { final MapTile map = new MapTileGame ( ) ; map . create ( level , sheetsConfig ) ; final MapTileGroup mapGroup = new MapTileGroupModel ( ) ; mapGroup . loadGroups ( groupsConfig ) ; map . addFeature ( mapGroup ) ; mapsSet . add ( map ) ; } return getTransitions ( mapsSet ) ;
public class RestClientUtil { /** * 发送es restful sql请求 / _ xpack / sql , 获取返回值 , 返回值类型由beanType决定 * @ param beanType * @ param oldPage * @ param < T > * @ return * @ throws ElasticSearchException */ public < T > SQLResult < T > fetchQueryByCursor ( Class < T > beanType , SQLResult < T > oldPage ) throws ElasticSearchException { } }
if ( oldPage . getCursor ( ) == null ) { return null ; } SQLRestResponse result = this . client . executeRequest ( "/_xpack/sql" , new StringBuilder ( ) . append ( "{\"cursor\": \"" ) . append ( oldPage . getCursor ( ) ) . append ( "\"}" ) . toString ( ) , new SQLRestResponseHandler ( ) ) ; SQLResult < T > datas = ResultUtil . buildFetchSQLResult ( result , beanType , oldPage ) ; datas . setClientInterface ( this ) ; return datas ;
public class FunctionMapper { /** * Resolves a Function via prefix and local name . * @ param prefix of the function * @ param localName of the function * @ return an instance of a Method */ public Method resolveFunction ( String prefix , String localName ) { } }
FunctionMapperSpi functionMapperSpi = findFunctionMapperSpi ( prefix ) ; return functionMapperSpi . resolveFunction ( localName ) ;
public class Util { /** * Closes a { @ link Connection } and logs exceptions without throwing . Does * nothing if connection is null . * @ param connection */ static void closeQuietly ( Connection connection ) { } }
try { if ( connection != null && ! connection . isClosed ( ) ) { connection . close ( ) ; log . debug ( "closed {}" , connection ) ; } } catch ( SQLException e ) { log . debug ( e . getMessage ( ) , e ) ; } catch ( RuntimeException e ) { log . debug ( e . getMessage ( ) , e ) ; }
public class SpatialAnchorsAccountsInner { /** * Get Both of the 2 Keys of a Spatial Anchors Account . * @ param resourceGroupName Name of an Azure resource group . * @ param spatialAnchorsAccountName Name of an Mixed Reality Spatial Anchors Account . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < SpatialAnchorsAccountKeysInner > getKeysAsync ( String resourceGroupName , String spatialAnchorsAccountName , final ServiceCallback < SpatialAnchorsAccountKeysInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( getKeysWithServiceResponseAsync ( resourceGroupName , spatialAnchorsAccountName ) , serviceCallback ) ;
public class MarkupSelectorFilter { /** * Text events */ boolean matchText ( final boolean blockMatching , final int markupLevel , final int markupBlockIndex ) { } }
checkMarkupLevel ( markupLevel ) ; if ( this . markupSelectorItem . anyLevel ( ) || markupLevel == 0 || ( this . prev != null && this . prev . matchedMarkupLevels [ markupLevel - 1 ] ) ) { // This text has not matched yet , but might match , so we should check this . matchesThisLevel = this . markupSelectorItem . matchesText ( markupBlockIndex , this . markupBlockMatchingCounter ) ; if ( matchesPreviousOrCurrentLevel ( markupLevel ) ) { // This filter was already matched by a previous level ( through an " open " event ) , so just delegate to next . if ( this . next != null ) { return this . next . matchText ( blockMatching , markupLevel , markupBlockIndex ) ; } return ( blockMatching ? true : this . matchesThisLevel ) ; } else if ( this . matchesThisLevel ) { // This filter was not matched before . So the fact that it matches now means we need to consume it , // therefore not delegating . return ( this . next == null ) ; } } else if ( matchesPreviousOrCurrentLevel ( markupLevel ) ) { // This filter was already matched by a previous level ( through an " open " event ) , so just delegate to next . if ( this . next != null ) { return this . next . matchText ( blockMatching , markupLevel , markupBlockIndex ) ; } return blockMatching ; } return false ;
public class PackingPlanBuilder { /** * updateNumContainers method */ public PackingPlanBuilder addInstance ( Integer containerId , String componentName ) throws ConstraintViolationException { } }
// create container if not existed initContainer ( containerId ) ; Integer taskId = taskIds . isEmpty ( ) ? 1 : taskIds . last ( ) + 1 ; Integer componentIndex = componentIndexes . containsKey ( componentName ) ? componentIndexes . get ( componentName ) . last ( ) + 1 : 0 ; InstanceId instanceId = new InstanceId ( componentName , taskId , componentIndex ) ; Resource instanceResource = componentResourceMap . getOrDefault ( componentName , defaultInstanceResource ) ; Container container = containers . get ( containerId ) ; PackingPlan . InstancePlan instancePlan = new PackingPlan . InstancePlan ( instanceId , instanceResource ) ; // Check constraints for ( InstanceConstraint constraint : instanceConstraints ) { constraint . validate ( instancePlan ) ; } for ( PackingConstraint constraint : packingConstraints ) { constraint . validate ( container , instancePlan ) ; } addToContainer ( container , instancePlan , this . componentIndexes , this . taskIds ) ; LOG . finest ( String . format ( "Added to container %d instance %s" , containerId , instanceId ) ) ; return this ;
public class Format { /** * Creates an < code > AttributedCharacterIterator < / code > for the String * < code > s < / code > . * @ param s String to create AttributedCharacterIterator from * @ return AttributedCharacterIterator wrapping s */ AttributedCharacterIterator createAttributedCharacterIterator ( String s ) { } }
AttributedString as = new AttributedString ( s ) ; return as . getIterator ( ) ;
public class MultipleRecipientManager { /** * Returns the address of the multiple recipients service . To obtain such address service * discovery is going to be used on the connected server and if none was found then another * attempt will be tried on the server items . The discovered information is going to be * cached for 24 hours . * @ param connection the connection to use for disco . The connected server is going to be * queried . * @ return the address of the multiple recipients service or < tt > null < / tt > if none was found . * @ throws NoResponseException if there was no response from the server . * @ throws XMPPErrorException * @ throws NotConnectedException * @ throws InterruptedException */ private static DomainBareJid getMultipleRecipientServiceAddress ( XMPPConnection connection ) throws NoResponseException , XMPPErrorException , NotConnectedException , InterruptedException { } }
ServiceDiscoveryManager sdm = ServiceDiscoveryManager . getInstanceFor ( connection ) ; return sdm . findService ( MultipleAddresses . NAMESPACE , true ) ;
public class JSONUtils { /** * Tests if the obj is a javaScript null . */ public static boolean isNull ( Object obj ) { } }
if ( obj instanceof JSONObject ) { return ( ( JSONObject ) obj ) . isNullObject ( ) ; } return JSONNull . getInstance ( ) . equals ( obj ) ;
public class Nengo { /** * / * [ deutsch ] * < p > Versucht , den vorherigen Nengo in chronologischer Reihenfolge zu finden . < / p > * < p > Hinweis : Wenn dieser Nengo einen Nengo des Nordhofs in der Nanboku - ch & # 333 ; - Zeit ( 1336-1392) * repr & auml ; sentiert , dann wird der vorherige Nengo ebenfalls vom Nordhof sein , aber nicht vom * S & uuml ; dhof . Falls dieser Nengo der erste des Nordhofs ist , dann ist der vorherige * der Nengo Genk & # 333 ; : ( 1391 ) . < / p > * @ return previous nengo which is only present if this nengo is not the first nengo */ public Optional < Nengo > findPrevious ( ) { } }
if ( this . court == COURT_NORTHERN ) { if ( this . index == 0 ) { return Optional . of ( OFFICIAL_NENGOS [ NENGO_KENMU . index - 1 ] ) ; } else { return Optional . of ( NORTHERN_NENGOS [ this . index - 1 ] ) ; } } else if ( this . index == 0 ) { return Optional . empty ( ) ; } else { return Optional . of ( OFFICIAL_NENGOS [ this . index - 1 ] ) ; }
public class SipResourceAdaptor { /** * ( non - Javadoc ) * @ see javax . slee . resource . ResourceAdaptor # queryLiveness ( javax . slee . resource . ActivityHandle ) */ public void queryLiveness ( ActivityHandle arg0 ) { } }
final SipActivityHandle handle = ( SipActivityHandle ) arg0 ; final Wrapper activity = activityManagement . get ( handle ) ; if ( activity == null || activity . isEnding ( ) ) { sleeEndpoint . endActivity ( handle ) ; }
public class AWSGlueClient { /** * Retrieves information about a specified partition . * @ param getPartitionRequest * @ return Result of the GetPartition operation returned by the service . * @ throws EntityNotFoundException * A specified entity does not exist * @ throws InvalidInputException * The input provided was not valid . * @ throws InternalServiceException * An internal service error occurred . * @ throws OperationTimeoutException * The operation timed out . * @ throws GlueEncryptionException * An encryption operation failed . * @ sample AWSGlue . GetPartition * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / glue - 2017-03-31 / GetPartition " target = " _ top " > AWS API * Documentation < / a > */ @ Override public GetPartitionResult getPartition ( GetPartitionRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeGetPartition ( request ) ;
public class StopExecutionRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( StopExecutionRequest stopExecutionRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( stopExecutionRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( stopExecutionRequest . getExecutionArn ( ) , EXECUTIONARN_BINDING ) ; protocolMarshaller . marshall ( stopExecutionRequest . getError ( ) , ERROR_BINDING ) ; protocolMarshaller . marshall ( stopExecutionRequest . getCause ( ) , CAUSE_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class NoOpCipherExecutor { /** * Gets instance . * @ return the instance */ static < I , O > CipherExecutor < I , O > getInstance ( ) { } }
if ( INSTANCE == null ) { synchronized ( NoOpCipherExecutor . class ) { if ( INSTANCE == null ) { INSTANCE = new NoOpCipherExecutor < > ( ) ; } } } return INSTANCE ;
public class CommercePriceEntryUtil { /** * Returns the first commerce price entry in the ordered set where CPInstanceUuid = & # 63 ; . * @ param CPInstanceUuid the cp instance uuid * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the first matching commerce price entry , or < code > null < / code > if a matching commerce price entry could not be found */ public static CommercePriceEntry fetchByCPInstanceUuid_First ( String CPInstanceUuid , OrderByComparator < CommercePriceEntry > orderByComparator ) { } }
return getPersistence ( ) . fetchByCPInstanceUuid_First ( CPInstanceUuid , orderByComparator ) ;
public class AtomContainer { /** * { @ inheritDoc } */ @ Override public int indexOf ( IAtom atom ) { } }
for ( int i = 0 ; i < atomCount ; i ++ ) { if ( atoms [ i ] . equals ( atom ) ) return i ; } return - 1 ;
public class FloatingLabelWidgetBase { /** * Specifies a new LabelAnimator to handle calls to show / hide the label * @ param labelAnimator LabelAnimator to use ; null causes use of the default LabelAnimator */ public void setLabelAnimator ( LabelAnimator labelAnimator ) { } }
if ( labelAnimator == null ) { this . labelAnimator = new DefaultLabelAnimator ( ) ; } else { if ( this . labelAnimator != null ) { labelAnimator . setLabelAnchored ( getInputWidget ( ) , getFloatingLabel ( ) , this . labelAnimator . isAnchored ( ) ) ; } this . labelAnimator = labelAnimator ; } if ( isInEditMode ( ) ) { this . labelAnimator . setLabelAnchored ( getInputWidget ( ) , getFloatingLabel ( ) , false ) ; }
public class Cob2Xsd { /** * Apply the lexer to produce a token stream from source . * @ param cleanedCobolSource the source code ( clean outside columns 7 to 72) * @ return an antlr token stream * @ throws RecognizerException if lexer failed to tokenize COBOL source */ public CommonTokenStream lex ( final String cleanedCobolSource ) throws RecognizerException { } }
if ( _log . isDebugEnabled ( ) ) { _log . debug ( "2. Lexing COBOL source code: {}" , cleanedCobolSource ) ; } try { CobolStructureLexer lex = new CobolStructureLexerImpl ( new ANTLRReaderStream ( new StringReader ( cleanedCobolSource ) ) , getErrorHandler ( ) ) ; CommonTokenStream tokens = new CommonTokenStream ( lex ) ; if ( lex . getNumberOfSyntaxErrors ( ) != 0 || tokens == null ) { throw ( new RecognizerException ( "Lexing failed. " + lex . getNumberOfSyntaxErrors ( ) + " syntax errors." + " Last error was " + getErrorHistory ( ) . get ( getErrorHistory ( ) . size ( ) - 1 ) ) ) ; } return tokens ; } catch ( IOException e ) { throw ( new RecognizerException ( e ) ) ; }
public class BaseRenderer { /** * Adds a cell with the specified content to the grid row . * @ param row List row . * @ param label Content for cell . Auto - detects type of content . * @ return Newly created cell . */ public Span addContent ( Row row , String label ) { } }
Span cell = new Span ( ) ; cell . addChild ( CWFUtil . getTextComponent ( label ) ) ; row . addChild ( cell ) ; return cell ;
public class XMemberFeatureCallImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ SuppressWarnings ( "unchecked" ) @ Override public void eSet ( int featureID , Object newValue ) { } }
switch ( featureID ) { case XbasePackage . XMEMBER_FEATURE_CALL__MEMBER_CALL_TARGET : setMemberCallTarget ( ( XExpression ) newValue ) ; return ; case XbasePackage . XMEMBER_FEATURE_CALL__MEMBER_CALL_ARGUMENTS : getMemberCallArguments ( ) . clear ( ) ; getMemberCallArguments ( ) . addAll ( ( Collection < ? extends XExpression > ) newValue ) ; return ; case XbasePackage . XMEMBER_FEATURE_CALL__EXPLICIT_OPERATION_CALL : setExplicitOperationCall ( ( Boolean ) newValue ) ; return ; case XbasePackage . XMEMBER_FEATURE_CALL__EXPLICIT_STATIC : setExplicitStatic ( ( Boolean ) newValue ) ; return ; case XbasePackage . XMEMBER_FEATURE_CALL__NULL_SAFE : setNullSafe ( ( Boolean ) newValue ) ; return ; case XbasePackage . XMEMBER_FEATURE_CALL__TYPE_LITERAL : setTypeLiteral ( ( Boolean ) newValue ) ; return ; case XbasePackage . XMEMBER_FEATURE_CALL__STATIC_WITH_DECLARING_TYPE : setStaticWithDeclaringType ( ( Boolean ) newValue ) ; return ; case XbasePackage . XMEMBER_FEATURE_CALL__PACKAGE_FRAGMENT : setPackageFragment ( ( Boolean ) newValue ) ; return ; } super . eSet ( featureID , newValue ) ;
public class CommerceShippingFixedOptionPersistenceImpl { /** * Creates a new commerce shipping fixed option with the primary key . Does not add the commerce shipping fixed option to the database . * @ param commerceShippingFixedOptionId the primary key for the new commerce shipping fixed option * @ return the new commerce shipping fixed option */ @ Override public CommerceShippingFixedOption create ( long commerceShippingFixedOptionId ) { } }
CommerceShippingFixedOption commerceShippingFixedOption = new CommerceShippingFixedOptionImpl ( ) ; commerceShippingFixedOption . setNew ( true ) ; commerceShippingFixedOption . setPrimaryKey ( commerceShippingFixedOptionId ) ; commerceShippingFixedOption . setCompanyId ( companyProvider . getCompanyId ( ) ) ; return commerceShippingFixedOption ;
public class RegionBackendServiceClient { /** * Retrieves the list of regional BackendService resources available to the specified project in * the given region . * < p > Sample code : * < pre > < code > * try ( RegionBackendServiceClient regionBackendServiceClient = RegionBackendServiceClient . create ( ) ) { * ProjectRegionName region = ProjectRegionName . of ( " [ PROJECT ] " , " [ REGION ] " ) ; * for ( BackendService element : regionBackendServiceClient . listRegionBackendServices ( region . toString ( ) ) . iterateAll ( ) ) { * / / doThingsWith ( element ) ; * < / code > < / pre > * @ param region Name of the region scoping this request . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi public final ListRegionBackendServicesPagedResponse listRegionBackendServices ( String region ) { } }
ListRegionBackendServicesHttpRequest request = ListRegionBackendServicesHttpRequest . newBuilder ( ) . setRegion ( region ) . build ( ) ; return listRegionBackendServices ( request ) ;
public class TypeReflector { /** * Creates an instance of an object type specified by its name . * @ param name an object type name . * @ param args arguments for the object constructor . * @ return the created object instance . * @ throws Exception when type of instance not found * @ see # getType ( String , String ) * @ see # createInstanceByType ( Class , Object . . . ) */ public static Object createInstance ( String name , Object ... args ) throws Exception { } }
return createInstance ( name , ( String ) null , args ) ;
public class SimonServletFilter { /** * Determines whether the request is over the threshold - with all incoming parameters this method can be * very flexible . Default implementation just compares the actual requestNanoTime with * { @ link # getThreshold ( javax . servlet . http . HttpServletRequest ) } ( which by default returns value configured * in { @ code web . xml } ) * @ param request HTTP servlet request * @ param requestNanoTime actual HTTP request nano time * @ param splits all splits started for the request * @ return { @ code true } , if request should be reported as over threshold */ protected boolean shouldBeReported ( HttpServletRequest request , long requestNanoTime , List < Split > splits ) { } }
return requestNanoTime > getThreshold ( request ) ;
public class SVMLightClassifierFactory { /** * Converts the weight Counter to be from indexed , svm _ light format , to a format * we can use in our LinearClassifier . */ private ClassicCounter < Pair < F , L > > convertWeights ( ClassicCounter < Integer > weights , Index < F > featureIndex , Index < L > labelIndex , boolean multiclass ) { } }
return multiclass ? convertSVMStructWeights ( weights , featureIndex , labelIndex ) : convertSVMLightWeights ( weights , featureIndex , labelIndex ) ;
public class LuceneGazetteer { /** * Retrieves and sets the parents of the provided children . * @ param childMap the map of parent geonameID to the set of children that belong to it * @ throws IOException if an error occurs during parent resolution */ private void resolveParents ( final Map < Integer , Set < GeoName > > childMap ) throws IOException { } }
Map < Integer , GeoName > parentMap = new HashMap < Integer , GeoName > ( ) ; Map < Integer , Set < GeoName > > grandParentMap = new HashMap < Integer , Set < GeoName > > ( ) ; for ( Integer parentId : childMap . keySet ( ) ) { // Lucene query used to look for exact match on the " geonameID " field Query q = NumericRangeQuery . newIntRange ( GEONAME_ID . key ( ) , parentId , parentId , true , true ) ; TopDocs results = indexSearcher . search ( q , null , 1 , POPULATION_SORT ) ; if ( results . scoreDocs . length > 0 ) { Document doc = indexSearcher . doc ( results . scoreDocs [ 0 ] . doc ) ; GeoName parent = BasicGeoName . parseFromGeoNamesRecord ( doc . get ( GEONAME . key ( ) ) , doc . get ( PREFERRED_NAME . key ( ) ) ) ; parentMap . put ( parent . getGeonameID ( ) , parent ) ; if ( ! parent . isAncestryResolved ( ) ) { Integer grandParentId = PARENT_ID . getValue ( doc ) ; if ( grandParentId != null ) { Set < GeoName > geos = grandParentMap . get ( grandParentId ) ; if ( geos == null ) { geos = new HashSet < GeoName > ( ) ; grandParentMap . put ( grandParentId , geos ) ; } geos . add ( parent ) ; } } } else { LOG . error ( "Unable to find parent GeoName [{}]" , parentId ) ; } } // find all parents of the parents if ( ! grandParentMap . isEmpty ( ) ) { resolveParents ( grandParentMap ) ; } // set parents of children for ( Integer parentId : childMap . keySet ( ) ) { GeoName parent = parentMap . get ( parentId ) ; if ( parent == null ) { LOG . info ( "Unable to find parent with ID [{}]" , parentId ) ; continue ; } for ( GeoName child : childMap . get ( parentId ) ) { child . setParent ( parent ) ; } }
public class Single { /** * Concatenates a sequence of SingleSource eagerly into a single stream of values . * Eager concatenation means that once a subscriber subscribes , this operator subscribes to all of the * source SingleSources . The operator buffers the value emitted by these SingleSources and then drains them * in order , each one after the previous one completes . * < dl > * < dt > < b > Backpressure : < / b > < / dt > * < dd > The operator honors backpressure from downstream . < / dd > * < dt > < b > Scheduler : < / b > < / dt > * < dd > This method does not operate by default on a particular { @ link Scheduler } . < / dd > * < / dl > * @ param < T > the value type * @ param sources a sequence of Single that need to be eagerly concatenated * @ return the new Flowable instance with the specified concatenation behavior */ @ BackpressureSupport ( BackpressureKind . FULL ) @ CheckReturnValue @ SchedulerSupport ( SchedulerSupport . NONE ) public static < T > Flowable < T > concatArrayEager ( SingleSource < ? extends T > ... sources ) { } }
return Flowable . fromArray ( sources ) . concatMapEager ( SingleInternalHelper . < T > toFlowable ( ) ) ;
public class MinioClient { /** * Removes multiple objects from a bucket . * < / p > < b > Example : < / b > < br > * < pre > { @ code / / Create object list for removal . * List < String > objectNames = new LinkedList < String > ( ) ; * objectNames . add ( " my - objectname1 " ) ; * objectNames . add ( " my - objectname2 " ) ; * objectNames . add ( " my - objectname3 " ) ; * for ( Result < DeleteError > errorResult : minioClient . removeObjects ( " my - bucketname " , objectNames ) ) { * DeleteError error = errorResult . get ( ) ; * System . out . println ( " Failed to remove ' " + error . objectName ( ) + " ' . Error : " + error . message ( ) ) ; * } } < / pre > * @ param bucketName Bucket name . * @ param objectNames List of Object names in the bucket . */ public Iterable < Result < DeleteError > > removeObjects ( final String bucketName , final Iterable < String > objectNames ) { } }
return new Iterable < Result < DeleteError > > ( ) { @ Override public Iterator < Result < DeleteError > > iterator ( ) { return new Iterator < Result < DeleteError > > ( ) { private Result < DeleteError > error ; private Iterator < DeleteError > errorIterator ; private boolean completed = false ; private Iterator < String > objectNameIter = objectNames . iterator ( ) ; private synchronized void populate ( ) { List < DeleteError > errorList = null ; try { List < DeleteObject > objectList = new LinkedList < DeleteObject > ( ) ; int i = 0 ; while ( objectNameIter . hasNext ( ) && i < 1000 ) { objectList . add ( new DeleteObject ( objectNameIter . next ( ) ) ) ; i ++ ; } if ( i > 0 ) { errorList = removeObject ( bucketName , objectList ) ; } } catch ( InvalidBucketNameException | NoSuchAlgorithmException | InsufficientDataException | IOException | InvalidKeyException | NoResponseException | XmlPullParserException | ErrorResponseException | InternalException e ) { this . error = new Result < > ( null , e ) ; } finally { if ( errorList != null ) { this . errorIterator = errorList . iterator ( ) ; } else { this . errorIterator = new LinkedList < DeleteError > ( ) . iterator ( ) ; } } } @ Override public boolean hasNext ( ) { if ( this . completed ) { return false ; } if ( this . error == null && this . errorIterator == null ) { populate ( ) ; } if ( this . error == null && this . errorIterator != null && ! this . errorIterator . hasNext ( ) ) { populate ( ) ; } if ( this . error != null ) { return true ; } if ( this . errorIterator . hasNext ( ) ) { return true ; } this . completed = true ; return false ; } @ Override public Result < DeleteError > next ( ) { if ( this . completed ) { throw new NoSuchElementException ( ) ; } if ( this . error == null && this . errorIterator == null ) { populate ( ) ; } if ( this . error == null && this . errorIterator != null && ! this . errorIterator . hasNext ( ) ) { populate ( ) ; } if ( this . error != null ) { this . completed = true ; return this . error ; } if ( this . errorIterator . hasNext ( ) ) { return new Result < > ( this . errorIterator . next ( ) , null ) ; } this . completed = true ; throw new NoSuchElementException ( ) ; } @ Override public void remove ( ) { throw new UnsupportedOperationException ( ) ; } } ; } } ;
public class CurrentThreadToServletContext { /** * - - - - - Public Methods */ Object getFallbackFactory ( FactoryFinderInstance brokenFactoryManager , String factoryName ) { } }
Object result = null ; ClassLoader cl = getClassLoader ( ) ; for ( Map . Entry < FactoryManagerCacheKey , FactoryFinderInstance > cur : applicationMap . entrySet ( ) ) { if ( cur . getKey ( ) . getClassLoader ( ) . equals ( cl ) && ! cur . getValue ( ) . equals ( brokenFactoryManager ) ) { result = cur . getValue ( ) . getFactory ( factoryName ) ; if ( null != result ) { break ; } } } return result ;
public class ObjectTypeConfigurationRegistry { /** * Returns the ObjectTypeConfiguration object for the given object or * creates a new one if none is found in the cache */ public ObjectTypeConf getObjectTypeConf ( EntryPointId entrypoint , Object object ) { } }
// first see if it ' s a ClassObjectTypeConf Object key ; if ( object instanceof Activation ) { key = ClassObjectType . Match_ObjectType . getClassType ( ) ; } else if ( object instanceof Fact ) { key = ( ( Fact ) object ) . getFactTemplate ( ) . getName ( ) ; } else { key = object . getClass ( ) ; } ObjectTypeConf objectTypeConf = this . typeConfMap . get ( key ) ; // it doesn ' t exist , so create it . if ( objectTypeConf == null ) { if ( object instanceof Fact ) { objectTypeConf = new FactTemplateTypeConf ( entrypoint , ( ( Fact ) object ) . getFactTemplate ( ) , this . kBase ) ; } else { objectTypeConf = new ClassObjectTypeConf ( entrypoint , ( Class < ? > ) key , this . kBase ) ; } ObjectTypeConf existing = this . typeConfMap . putIfAbsent ( key , objectTypeConf ) ; if ( existing != null ) { // Raced , take the ( now ) existing . objectTypeConf = existing ; } } return objectTypeConf ;
public class MiniSat { /** * Returns a new MiniCard solver with a given configuration . * @ param f the formula factory * @ param config the configuration * @ return the solver */ public static MiniSat miniCard ( final FormulaFactory f , final MiniSatConfig config ) { } }
return new MiniSat ( f , SolverStyle . MINICARD , config , null ) ;
public class vpnvserver_authenticationradiuspolicy_binding { /** * Use this API to fetch vpnvserver _ authenticationradiuspolicy _ binding resources of given name . */ public static vpnvserver_authenticationradiuspolicy_binding [ ] get ( nitro_service service , String name ) throws Exception { } }
vpnvserver_authenticationradiuspolicy_binding obj = new vpnvserver_authenticationradiuspolicy_binding ( ) ; obj . set_name ( name ) ; vpnvserver_authenticationradiuspolicy_binding response [ ] = ( vpnvserver_authenticationradiuspolicy_binding [ ] ) obj . get_resources ( service ) ; return response ;
public class ConnectionPool { /** * Set the read - only state of the connection . If the connection throws an * exception , record this , and don ' t attempt to change the state in future */ private void setConnectionReadOnly ( Connection connection , boolean readOnly ) { } }
if ( supportsReadOnly ) { try { connection . setReadOnly ( readOnly ) ; } catch ( Throwable th ) { logger . info ( "Read-only connections not supported ({})" , th . getMessage ( ) ) ; supportsReadOnly = false ; } }
public class BinaryDataSegment { /** * { @ inheritDoc } */ public final int deserialize ( final ByteBuffer src , final int len ) { } }
resizeBuffer ( src . remaining ( ) , false ) ; dataBuffer . rewind ( ) ; transferBytes ( src , dataBuffer , len ) ; return dataBuffer . limit ( ) ;
public class FamilyFilterAdapter { /** * { @ inheritDoc } */ @ Override public FilterSupportStatus isFilterSupported ( FilterAdapterContext context , FamilyFilter filter ) { } }
ByteArrayComparable comparator = filter . getComparator ( ) ; if ( ! ( comparator instanceof RegexStringComparator ) && ! ( comparator instanceof BinaryComparator ) ) { return FilterSupportStatus . newNotSupported ( comparator . getClass ( ) . getName ( ) + " comparator is not supported" ) ; } if ( filter . getOperator ( ) != CompareFilter . CompareOp . EQUAL ) { return FilterSupportStatus . newNotSupported ( filter . getOperator ( ) + " operator is not supported" ) ; } return FilterSupportStatus . SUPPORTED ;
public class Histogram { /** * Finds the index of the bucket in which the given value should be . */ private int bucketIndexFor ( final int value ) { } }
if ( value < cutoff ) { return value / interval ; } int bucket = num_linear_buckets // Skip all linear buckets . // And find which bucket the rest ( after ` cutoff ' ) should be in . // Reminder : the first exponential bucket ends at 2 ^ exp _ bucket _ shift . + log2rounddown ( ( value - cutoff ) >> exp_bucket_shift ) ; if ( bucket >= buckets . length ) { return buckets . length - 1 ; } return bucket ;
public class HELM1Utils { /** * method to generate a canonical HELM 1 connection section * @ param convertsortedIdstoIds Map of old ids with the equivalent new ids * @ return second section of HELM * @ throws HELM1ConverterException */ private static String setCanonicalHELMSecondSection ( Map < String , String > convertsortedIdstoIds , List < ConnectionNotation > connectionNotations ) throws HELM1ConverterException { } }
StringBuilder notation = new StringBuilder ( ) ; for ( ConnectionNotation connectionNotation : connectionNotations ) { /* canonicalize connection */ /* change the id ' s of the polymers to the sorted ids */ List < String > connections = new ArrayList < String > ( ) ; String source = connectionNotation . getSourceId ( ) . getId ( ) ; String target = connectionNotation . getTargetId ( ) . getId ( ) ; /* pairs will be not shown */ if ( ! ( connectionNotation . toHELM ( ) . equals ( "" ) ) ) { connections . add ( convertConnection ( connectionNotation . toHELM ( ) , source , target , convertsortedIdstoIds ) ) ; connections . add ( convertConnection ( connectionNotation . toReverseHELM ( ) , source , target , convertsortedIdstoIds ) ) ; Collections . sort ( connections ) ; notation . append ( connections . get ( 0 ) + "|" ) ; } } if ( notation . length ( ) > 1 ) { notation . setLength ( notation . length ( ) - 1 ) ; } return notation . toString ( ) ;
public class Tools { /** * 文本分词 * @ param text * @ return */ public static List < Word > getWords ( String text ) { } }
List < Word > result = new ArrayList < > ( ) ; List < Word > words = WordParser . parse ( text ) ; for ( Word word : words ) { result . add ( word ) ; } return result ;
public class AmazonSimpleEmailServiceClient { /** * Returns a list of sending authorization policies that are attached to the given identity ( an email address or a * domain ) . This API returns only a list . If you want the actual policy content , you can use * < code > GetIdentityPolicies < / code > . * < note > * This API is for the identity owner only . If you have not verified the identity , this API will return an error . * < / note > * Sending authorization is a feature that enables an identity owner to authorize other senders to use its * identities . For information about using sending authorization , see the < a * href = " http : / / docs . aws . amazon . com / ses / latest / DeveloperGuide / sending - authorization . html " > Amazon SES Developer * Guide < / a > . * You can execute this operation no more than once per second . * @ param listIdentityPoliciesRequest * Represents a request to return a list of sending authorization policies that are attached to an identity . * Sending authorization is an Amazon SES feature that enables you to authorize other senders to use your * identities . For information , see the < a * href = " http : / / docs . aws . amazon . com / ses / latest / DeveloperGuide / sending - authorization . html " > Amazon SES * Developer Guide < / a > . * @ return Result of the ListIdentityPolicies operation returned by the service . * @ sample AmazonSimpleEmailService . ListIdentityPolicies * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / email - 2010-12-01 / ListIdentityPolicies " target = " _ top " > AWS API * Documentation < / a > */ @ Override public ListIdentityPoliciesResult listIdentityPolicies ( ListIdentityPoliciesRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeListIdentityPolicies ( request ) ;
public class RuleEvaluator { /** * @ param resource resource * @ param subject subject * @ param action action * @ param environment environment * @ param matchingRules contexts * @ return decision */ private Decision internalEvaluate ( Map < String , String > resource , Subject subject , String action , Set < Attribute > environment , List < AclRule > matchingRules ) { } }
long start = System . currentTimeMillis ( ) ; if ( matchingRules . size ( ) < 1 ) { return authorize ( false , "No context matches subject or environment" , Explanation . Code . REJECTED_NO_SUBJECT_OR_ENV_FOUND , resource , subject , action , environment , System . currentTimeMillis ( ) - start ) ; } if ( resource == null ) { throw new IllegalArgumentException ( "Resource does not identify any resource because it's an empty resource property or null." ) ; } else { for ( Map . Entry < String , String > entry : resource . entrySet ( ) ) { if ( entry . getKey ( ) == null ) { throw new IllegalArgumentException ( "Resource definition cannot contain null property name." ) ; } if ( entry . getValue ( ) == null ) { throw new IllegalArgumentException ( "Resource definition cannot contain null value. Corresponding key: " + entry . getKey ( ) ) ; } } } if ( subject == null ) { throw new IllegalArgumentException ( "Invalid subject, subject is null." ) ; } if ( action == null || action . length ( ) <= 0 ) { return authorize ( false , "No action provided." , Explanation . Code . REJECTED_NO_ACTION_PROVIDED , resource , subject , action , environment , System . currentTimeMillis ( ) - start ) ; } // environment can be null . if ( environment == null ) { environment = Collections . emptySet ( ) ; } ContextDecision contextDecision = null ; ContextDecision lastDecision = null ; // long contextIncludeStart = System . currentTimeMillis ( ) ; boolean granted = false ; boolean denied = false ; for ( AclRule rule : matchingRules ) { final ContextDecision includes = ruleIncludesResourceAction ( rule , resource , action ) ; if ( Explanation . Code . REJECTED_DENIED == includes . getCode ( ) ) { contextDecision = includes ; denied = true ; return createAuthorize ( false , contextDecision , resource , subject , action , environment , System . currentTimeMillis ( ) - start ) ; } else if ( includes . granted ( ) ) { contextDecision = includes ; granted = true ; } lastDecision = includes ; } if ( granted ) { return createAuthorize ( true , contextDecision , resource , subject , action , environment , System . currentTimeMillis ( ) - start ) ; } if ( lastDecision == null ) { return authorize ( false , "No resource or action matched." , Explanation . Code . REJECTED_NO_RESOURCE_OR_ACTION_MATCH , resource , subject , action , environment , System . currentTimeMillis ( ) - start ) ; } else { return createAuthorize ( false , lastDecision , resource , subject , action , environment , System . currentTimeMillis ( ) - start ) ; }
public class ExtensionHttpSessions { /** * Builds and returns a list of http sessions that correspond to a given context . * @ param context the context * @ return the http sessions for context */ public List < HttpSession > getHttpSessionsForContext ( Context context ) { } }
List < HttpSession > sessions = new LinkedList < > ( ) ; if ( this . sessions == null ) { return sessions ; } synchronized ( sessionLock ) { for ( Entry < String , HttpSessionsSite > e : this . sessions . entrySet ( ) ) { String siteName = e . getKey ( ) ; siteName = "http://" + siteName ; if ( context . isInContext ( siteName ) ) sessions . addAll ( e . getValue ( ) . getHttpSessions ( ) ) ; } } return sessions ;
public class RoboconfReturnListener { /** * / * ( non - Javadoc ) * @ see com . rabbitmq . client . ReturnListener * # handleReturn ( int , java . lang . String , java . lang . String , java . lang . String , com . rabbitmq . client . AMQP . BasicProperties , byte [ ] ) */ @ Override public void handleReturn ( int replyCode , String replyText , String exchange , String routingKey , BasicProperties properties , byte [ ] body ) throws IOException { } }
String messageType = "undetermined" ; try { Message msg = SerializationUtils . deserializeObject ( body ) ; messageType = msg . getClass ( ) . getName ( ) ; } catch ( Exception e ) { this . logger . severe ( "Failed to deserialize a message object." ) ; Utils . logException ( this . logger , e ) ; } if ( this . logger . isLoggable ( Level . WARNING ) ) { final StringBuilder sb = new StringBuilder ( ) ; sb . append ( "A message sent by a RabbitMQ client was not received by any queue.\n" ) ; sb . append ( "Message type: " + messageType + '\n' ) ; sb . append ( "Routing key: " + routingKey + '\n' ) ; sb . append ( "Reason: " + replyText ) ; this . logger . warning ( sb . toString ( ) ) ; }
public class StreamReducer { /** * Creates a new instance of StreamReducer * @ param keyComparator comparator for compare keys */ public static < K , O , A > StreamReducer < K , O , A > create ( Comparator < K > keyComparator ) { } }
return new StreamReducer < > ( keyComparator ) ;
public class AbstractCentralAuthenticationService { /** * Verify the ticket id received is actually legitimate * before contacting downstream systems to find and process it . * @ param ticketId the ticket id * @ return true / false */ protected boolean isTicketAuthenticityVerified ( final String ticketId ) { } }
if ( this . cipherExecutor != null ) { LOGGER . trace ( "Attempting to decode service ticket [{}] to verify authenticity" , ticketId ) ; return ! StringUtils . isEmpty ( this . cipherExecutor . decode ( ticketId ) ) ; } return ! StringUtils . isEmpty ( ticketId ) ;
public class Boot { /** * Replies the name of the program . * @ return the name of the program . */ public static String getProgramName ( ) { } }
String programName = JanusConfig . getSystemProperty ( JanusConfig . JANUS_PROGRAM_NAME , null ) ; if ( Strings . isNullOrEmpty ( programName ) ) { programName = JanusConfig . JANUS_PROGRAM_NAME_VALUE ; } return programName ;