signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class LocalDateTime { /** * Calculates the amount of time until another date - time in terms of the specified unit . * This calculates the amount of time between two { @ code LocalDateTime } * objects in terms of a single { @ code TemporalUnit } . * The start and end points are { @ code this } and the specified date - time . * The result will be negative if the end is before the start . * The { @ code Temporal } passed to this method is converted to a * { @ code LocalDateTime } using { @ link # from ( TemporalAccessor ) } . * For example , the amount in days between two date - times can be calculated * using { @ code startDateTime . until ( endDateTime , DAYS ) } . * The calculation returns a whole number , representing the number of * complete units between the two date - times . * For example , the amount in months between 2012-06-15T00:00 and 2012-08-14T23:59 * will only be one month as it is one minute short of two months . * There are two equivalent ways of using this method . * The first is to invoke this method . * The second is to use { @ link TemporalUnit # between ( Temporal , Temporal ) } : * < pre > * / / these two lines are equivalent * amount = start . until ( end , MONTHS ) ; * amount = MONTHS . between ( start , end ) ; * < / pre > * The choice should be made based on which makes the code more readable . * The calculation is implemented in this method for { @ link ChronoUnit } . * The units { @ code NANOS } , { @ code MICROS } , { @ code MILLIS } , { @ code SECONDS } , * { @ code MINUTES } , { @ code HOURS } and { @ code HALF _ DAYS } , { @ code DAYS } , * { @ code WEEKS } , { @ code MONTHS } , { @ code YEARS } , { @ code DECADES } , * { @ code CENTURIES } , { @ code MILLENNIA } and { @ code ERAS } are supported . * Other { @ code ChronoUnit } values will throw an exception . * If the unit is not a { @ code ChronoUnit } , then the result of this method * is obtained by invoking { @ code TemporalUnit . between ( Temporal , Temporal ) } * passing { @ code this } as the first argument and the converted input temporal * as the second argument . * This instance is immutable and unaffected by this method call . * @ param endExclusive the end date , exclusive , which is converted to a { @ code LocalDateTime } , not null * @ param unit the unit to measure the amount in , not null * @ return the amount of time between this date - time and the end date - time * @ throws DateTimeException if the amount cannot be calculated , or the end * temporal cannot be converted to a { @ code LocalDateTime } * @ throws UnsupportedTemporalTypeException if the unit is not supported * @ throws ArithmeticException if numeric overflow occurs */ @ Override public long until ( Temporal endExclusive , TemporalUnit unit ) { } }
LocalDateTime end = LocalDateTime . from ( endExclusive ) ; if ( unit instanceof ChronoUnit ) { if ( unit . isTimeBased ( ) ) { long amount = date . daysUntil ( end . date ) ; if ( amount == 0 ) { return time . until ( end . time , unit ) ; } long timePart = end . time . toNanoOfDay ( ) - time . toNanoOfDay ( ) ; if ( amount > 0 ) { amount -- ; // safe timePart += NANOS_PER_DAY ; // safe } else { amount ++ ; // safe timePart -= NANOS_PER_DAY ; // safe } switch ( ( ChronoUnit ) unit ) { case NANOS : amount = Math . multiplyExact ( amount , NANOS_PER_DAY ) ; break ; case MICROS : amount = Math . multiplyExact ( amount , MICROS_PER_DAY ) ; timePart = timePart / 1000 ; break ; case MILLIS : amount = Math . multiplyExact ( amount , MILLIS_PER_DAY ) ; timePart = timePart / 1_000_000 ; break ; case SECONDS : amount = Math . multiplyExact ( amount , SECONDS_PER_DAY ) ; timePart = timePart / NANOS_PER_SECOND ; break ; case MINUTES : amount = Math . multiplyExact ( amount , MINUTES_PER_DAY ) ; timePart = timePart / NANOS_PER_MINUTE ; break ; case HOURS : amount = Math . multiplyExact ( amount , HOURS_PER_DAY ) ; timePart = timePart / NANOS_PER_HOUR ; break ; case HALF_DAYS : amount = Math . multiplyExact ( amount , 2 ) ; timePart = timePart / ( NANOS_PER_HOUR * 12 ) ; break ; } return Math . addExact ( amount , timePart ) ; } LocalDate endDate = end . date ; if ( endDate . isAfter ( date ) && end . time . isBefore ( time ) ) { endDate = endDate . minusDays ( 1 ) ; } else if ( endDate . isBefore ( date ) && end . time . isAfter ( time ) ) { endDate = endDate . plusDays ( 1 ) ; } return date . until ( endDate , unit ) ; } return unit . between ( this , end ) ;
public class CreateReplicationGroupRequest { /** * A list of EC2 Availability Zones in which the replication group ' s clusters are created . The order of the * Availability Zones in the list is the order in which clusters are allocated . The primary cluster is created in * the first AZ in the list . * This parameter is not used if there is more than one node group ( shard ) . You should use * < code > NodeGroupConfiguration < / code > instead . * < note > * If you are creating your replication group in an Amazon VPC ( recommended ) , you can only locate clusters in * Availability Zones associated with the subnets in the selected subnet group . * The number of Availability Zones listed must equal the value of < code > NumCacheClusters < / code > . * < / note > * Default : system chosen Availability Zones . * @ param preferredCacheClusterAZs * A list of EC2 Availability Zones in which the replication group ' s clusters are created . The order of the * Availability Zones in the list is the order in which clusters are allocated . The primary cluster is * created in the first AZ in the list . < / p > * This parameter is not used if there is more than one node group ( shard ) . You should use * < code > NodeGroupConfiguration < / code > instead . * < note > * If you are creating your replication group in an Amazon VPC ( recommended ) , you can only locate clusters in * Availability Zones associated with the subnets in the selected subnet group . * The number of Availability Zones listed must equal the value of < code > NumCacheClusters < / code > . * < / note > * Default : system chosen Availability Zones . */ public void setPreferredCacheClusterAZs ( java . util . Collection < String > preferredCacheClusterAZs ) { } }
if ( preferredCacheClusterAZs == null ) { this . preferredCacheClusterAZs = null ; return ; } this . preferredCacheClusterAZs = new com . amazonaws . internal . SdkInternalList < String > ( preferredCacheClusterAZs ) ;
public class DirContextAdapter { /** * Collect all modifications for the changed attribute . If no changes have * been made , return immediately . If modifications have been made , and the * original size as well as the updated size of the attribute is 1 , replace * the attribute . If the size of the updated attribute is 0 , remove the * attribute . Otherwise , the attribute is a multi - value attribute ; if it ' s * an ordered one it should be replaced in its entirety to preserve the new * ordering , if not all modifications to the original value ( removals and * additions ) will be collected individually . * @ param changedAttr the value of the changed attribute . * @ param modificationList the list in which to add the modifications . * @ throws NamingException if thrown by called Attribute methods . */ private void collectModifications ( NameAwareAttribute changedAttr , List < ModificationItem > modificationList ) throws NamingException { } }
NameAwareAttribute currentAttribute = originalAttrs . get ( changedAttr . getID ( ) ) ; if ( currentAttribute != null && changedAttr . hasValuesAsNames ( ) ) { try { currentAttribute . initValuesAsNames ( ) ; } catch ( IllegalArgumentException e ) { log . warn ( "Incompatible attributes; changed attribute has Name values but " + "original cannot be converted to this" ) ; } } if ( changedAttr . equals ( currentAttribute ) ) { // No changes return ; } else if ( currentAttribute != null && currentAttribute . size ( ) == 1 && changedAttr . size ( ) == 1 ) { // Replace single - vale attribute . modificationList . add ( new ModificationItem ( DirContext . REPLACE_ATTRIBUTE , changedAttr ) ) ; } else if ( changedAttr . size ( ) == 0 && currentAttribute != null ) { // Attribute has been removed . modificationList . add ( new ModificationItem ( DirContext . REMOVE_ATTRIBUTE , changedAttr ) ) ; } else if ( ( currentAttribute == null || currentAttribute . size ( ) == 0 ) && changedAttr . size ( ) > 0 ) { // Attribute has been added . modificationList . add ( new ModificationItem ( DirContext . ADD_ATTRIBUTE , changedAttr ) ) ; } else if ( changedAttr . size ( ) > 0 && changedAttr . isOrdered ( ) ) { // This is a multivalue attribute and it is ordered - the original // value should be replaced with the new values so that the ordering // is preserved . modificationList . add ( new ModificationItem ( DirContext . REPLACE_ATTRIBUTE , changedAttr ) ) ; } else if ( changedAttr . size ( ) > 0 ) { // Change of multivalue Attribute . Collect additions and removals // individually . List < ModificationItem > myModifications = new LinkedList < ModificationItem > ( ) ; collectModifications ( currentAttribute , changedAttr , myModifications ) ; if ( myModifications . isEmpty ( ) ) { // This means that the attributes are not equal , but the // actual values are the same - thus the order must have // changed . This should result in a REPLACE _ ATTRIBUTE operation . myModifications . add ( new ModificationItem ( DirContext . REPLACE_ATTRIBUTE , changedAttr ) ) ; } modificationList . addAll ( myModifications ) ; }
public class UACHandler { /** * Generating an ACK to a 2xx response is the same as any other subsequent request . However , a * subsequent request is generated in the context of what is known as a Dialog and since we * currently are completely stateless , this is not necessarily an easy thing to achieve but for * now we will ignore many of the details . * The general idea is this though : * < ul > * < li > The sub - sequent request is going to be sent to where ever the Contact header of the * response is pointing to . This is known as the remote - target . < / li > * < li > CSeq is incremented by one EXCEPT for ACK ' s , which will have the same sequence number as * what it is " ack : ing " . The method of the CSeq is " ACK " though . < / li > * < li > Call - ID has to be the same < / li > * < li > The remote and local tags has to be correctly preserved on the To - and From - headers < / li > * < li > < / li > * < li > < / li > * < / ul > * @ param response * @ return */ private SipRequest generateAck ( final SipResponse response ) { } }
final ContactHeader contact = response . getContactHeader ( ) ; final SipURI requestURI = ( SipURI ) contact . getAddress ( ) . getURI ( ) ; final ToHeader to = response . getToHeader ( ) ; final FromHeader from = response . getFromHeader ( ) ; // The contact of the response is where the remote party wishes // to receive future request . Since an ACK is a " future " , or sub - sequent , request , // the request - uri of the ACK has to be whatever is in the // contact header of the response . // Since this is an ACK , the cseq should have the same cseq number as the response , // i . e . , the same as the original INVITE that we are ACK : ing . final CSeqHeader cseq = CSeqHeader . with ( ) . cseq ( response . getCSeqHeader ( ) . getSeqNumber ( ) ) . method ( "ACK" ) . build ( ) ; final CallIdHeader callId = response . getCallIDHeader ( ) ; // If there are Record - Route headers in the response , they must be // copied over as well otherwise the ACK will not go the correct // path through the network . // TODO // we also have to create a new Via header and as always , when creating // via header we need to fill out which ip , port and transport we are // coming in over . In SIP , unlike many other protocols , we can use // any transport protocol and it can actually change from message // to message but in this simple example we will just use the // same last time so we will only have to generate a new branch id final ViaHeader via = response . getViaHeader ( ) . clone ( ) ; via . setBranch ( ViaHeader . generateBranch ( ) ) ; // now we have all the pieces so let ' s put it together final SipRequest . Builder builder = SipRequest . ack ( requestURI ) ; builder . from ( from ) ; builder . to ( to ) ; builder . callId ( callId ) ; builder . cseq ( cseq ) ; builder . via ( via ) ; return builder . build ( ) ;
public class ManagedPropertyPersistenceHelper { /** * Generate param parser . * @ param context the context * @ param methodName the method name * @ param parameterTypeName the parameter type name * @ param persistType the persist type */ public static void generateParamParser ( BindTypeContext context , String methodName , TypeName parameterTypeName , PersistType persistType ) { } }
methodName = SQLiteDaoDefinition . PARAM_PARSER_PREFIX + methodName ; MethodSpec . Builder methodBuilder = MethodSpec . methodBuilder ( methodName ) . addJavadoc ( "for param $L parsing\n" , methodName ) . returns ( parameterTypeName ) ; methodBuilder . addModifiers ( context . modifiers ) ; switch ( persistType ) { case STRING : methodBuilder . addParameter ( ParameterSpec . builder ( className ( String . class ) , "input" ) . build ( ) ) ; break ; case BYTE : methodBuilder . addParameter ( ParameterSpec . builder ( TypeUtility . arrayTypeName ( Byte . TYPE ) , "input" ) . build ( ) ) ; break ; } methodBuilder . beginControlFlow ( "if (input==null)" ) ; methodBuilder . addStatement ( "return null" ) ; methodBuilder . endControlFlow ( ) ; methodBuilder . addStatement ( "$T context=$T.jsonBind()" , KriptonJsonContext . class , KriptonBinder . class ) ; methodBuilder . beginControlFlow ( "try ($T wrapper=context.createParser(input))" , JacksonWrapperParser . class ) ; methodBuilder . addStatement ( "$T jacksonParser=wrapper.jacksonParser" , JsonParser . class ) ; methodBuilder . addCode ( "// START_OBJECT\n" ) ; methodBuilder . addStatement ( "jacksonParser.nextToken()" ) ; methodBuilder . addCode ( "// value of \"element\"\n" ) ; methodBuilder . addStatement ( "jacksonParser.nextValue()" ) ; String parserName = "jacksonParser" ; BindTransform bindTransform = BindTransformer . lookup ( parameterTypeName ) ; methodBuilder . addStatement ( "$T result=null" , parameterTypeName ) ; BindProperty property = BindProperty . builder ( parameterTypeName ) . inCollection ( false ) . elementName ( DEFAULT_FIELD_NAME ) . build ( ) ; bindTransform . generateParseOnJackson ( context , methodBuilder , parserName , null , "result" , property ) ; methodBuilder . addStatement ( "return result" ) ; methodBuilder . nextControlFlow ( "catch($T e)" , Exception . class ) ; methodBuilder . addStatement ( "throw(new $T(e.getMessage()))" , KriptonRuntimeException . class ) ; methodBuilder . endControlFlow ( ) ; // typeBuilder . context . builder . addMethod ( methodBuilder . build ( ) ) ;
public class TypedArrayCompat { /** * Retrieve the resource identifier for the attribute at * < var > index < / var > . Note that attribute resource as resolved when * the overall { @ link TypedArray } object is retrieved . As a result , this function will return * the resource identifier of the final resource value that was found , < em > not < / em > necessarily * the original resource that was specified by the attribute . * @ param index Index of attribute to retrieve . * @ param def Value to return if the attribute is not defined or not a resource . * @ return Attribute resource identifier , or defValue if not defined . */ public static int getResourceId ( Resources . Theme theme , TypedArray a , TypedValue [ ] values , int index , int def ) { } }
if ( values != null && theme != null ) { TypedValue v = values [ index ] ; if ( v . type == TypedValue . TYPE_ATTRIBUTE ) { TEMP_ARRAY [ 0 ] = v . data ; TypedArray tmp = theme . obtainStyledAttributes ( null , TEMP_ARRAY , 0 , 0 ) ; try { return tmp . getResourceId ( 0 , def ) ; } finally { tmp . recycle ( ) ; } } } if ( a != null ) { return a . getResourceId ( index , def ) ; } return def ;
public class SchedulerForType { /** * Match requests to nodes . * @ return The list of granted resources for each session */ private Map < String , List < ResourceGrant > > scheduleTasks ( ) { } }
fullyScheduled = false ; long nodeWait = configManager . getLocalityWait ( type , LocalityLevel . NODE ) ; long rackWait = configManager . getLocalityWait ( type , LocalityLevel . RACK ) ; int tasksToSchedule = configManager . getGrantsPerIteration ( ) ; Map < String , List < ResourceGrant > > sessionIdToGranted = new HashMap < String , List < ResourceGrant > > ( ) ; for ( int i = 0 ; i < tasksToSchedule ; i ++ ) { ScheduledPair scheduled = scheduleOneTask ( nodeWait , rackWait ) ; if ( scheduled == null ) { // Cannot find matched request - node anymore . We are done . fullyScheduled = true ; break ; } List < ResourceGrant > granted = sessionIdToGranted . get ( scheduled . sessionId . toString ( ) ) ; if ( granted == null ) { granted = new LinkedList < ResourceGrant > ( ) ; sessionIdToGranted . put ( scheduled . sessionId . toString ( ) , granted ) ; } granted . add ( scheduled . grant ) ; } return sessionIdToGranted ;
public class QuerySnippets { /** * [ VARIABLE " my _ kind " ] */ public QueryResults < Key > newKeyQuery ( String kind ) { } }
// [ START newKeyQuery ] Query < Key > query = Query . newKeyQueryBuilder ( ) . setKind ( kind ) . build ( ) ; QueryResults < Key > results = datastore . run ( query ) ; // Use results // [ END newKeyQuery ] return results ;
public class Histogram { /** * add item to histogram , increasing its frequency by one */ public int add ( T item ) { } }
Integer count = map . get ( item ) ; if ( count == null ) { map . put ( item , 1 ) ; return 1 ; } else { map . put ( item , count + 1 ) ; return count + 1 ; }
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public EEnum getIfcVibrationIsolatorTypeEnum ( ) { } }
if ( ifcVibrationIsolatorTypeEnumEEnum == null ) { ifcVibrationIsolatorTypeEnumEEnum = ( EEnum ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 1098 ) ; } return ifcVibrationIsolatorTypeEnumEEnum ;
public class AppAuthenticator { /** * jh the application auth ticket using the refresh token */ public void refreshAppAuthTicket ( ) { } }
StringBuilder resourceUrl = new StringBuilder ( MozuConfig . getBaseUrl ( ) ) . append ( AuthTicketUrl . refreshAppAuthTicketUrl ( null ) . getUrl ( ) ) ; try { @ SuppressWarnings ( "unchecked" ) MozuClient < AuthTicket > client = ( MozuClient < AuthTicket > ) MozuClientFactory . getInstance ( AuthTicket . class ) ; AuthTicketRequest authTicketRequest = new AuthTicketRequest ( ) ; authTicketRequest . setRefreshToken ( appAuthTicket . getRefreshToken ( ) ) ; appAuthTicket = client . executePutRequest ( authTicketRequest , resourceUrl . toString ( ) , null ) ; } catch ( ApiException e ) { logger . warn ( e . getMessage ( ) , e ) ; throw e ; } catch ( Exception e ) { logger . warn ( e . getMessage ( ) , e ) ; throw new ApiException ( "Exception getting Mozu client: " + e . getMessage ( ) ) ; } logger . info ( "Setting app token refresh intervals" ) ; setRefreshIntervals ( false ) ; logger . info ( "App Authentication Done" ) ;
public class DNAToRNATranslator { /** * Takes in the given DNA Sequence and returns an instance of RNASequence * which is using { @ link RnaSequenceView } as a * { @ link ProxySequenceReader } . */ protected RNASequence wrapToRna ( Sequence < NucleotideCompound > dna ) { } }
ProxySequenceReader < NucleotideCompound > rnaView = new RnaSequenceView ( dna ) ; return new RNASequence ( rnaView ) ;
public class PeriodDuration { /** * Obtains an instance based on a period and duration . * The total amount of time of the resulting instance is the period plus the duration . * @ param period the period , not null * @ param duration the duration , not null * @ return the combined period - duration , not null */ public static PeriodDuration of ( Period period , Duration duration ) { } }
Objects . requireNonNull ( period , "The period must not be null" ) ; Objects . requireNonNull ( duration , "The duration must not be null" ) ; return new PeriodDuration ( period , duration ) ;
public class ScriptUtil { /** * Execute a process directly with some arguments * @ param logger logger * @ param workingdir working dir * @ param scriptargs arguments to the shell * @ param envContext Environment variable context * @ param newDataContext context data to replace in the scriptargs * @ param logName name of plugin to use in logging */ static Process execProcess ( final ExecutionListener logger , final File workingdir , final String scriptargs , final Map < String , Map < String , String > > envContext , final Map < String , Map < String , String > > newDataContext , final String logName ) throws IOException { } }
// use script - exec attribute and replace datareferences final String [ ] args = DataContextUtils . replaceDataReferencesInArray ( scriptargs . split ( " " ) , newDataContext ) ; // create system environment variables from the data context final Map < String , String > envMap = DataContextUtils . generateEnvVarsFromContext ( envContext ) ; final ArrayList < String > envlist = new ArrayList < String > ( ) ; for ( final String key : envMap . keySet ( ) ) { final String envval = envMap . get ( key ) ; envlist . add ( key + "=" + envval ) ; } final String [ ] envarr = envlist . toArray ( new String [ envlist . size ( ) ] ) ; logger . log ( 3 , "[" + logName + "] executing: " + StringArrayUtil . asString ( args , " " ) ) ; final Runtime runtime = Runtime . getRuntime ( ) ; return runtime . exec ( args , envarr , workingdir ) ;
public class Es6RewriteBlockScopedDeclaration { /** * Renames block - scoped declarations that shadow a variable in an outer scope * < p > Also normalizes declarations with no initializer in a loop to be initialized to undefined . */ private void visitBlockScopedName ( NodeTraversal t , Node decl , Node nameNode ) { } }
Scope scope = t . getScope ( ) ; Node parent = decl . getParent ( ) ; // Normalize " let x ; " to " let x = undefined ; " if in a loop , since we later convert x // to be $ jscomp $ loop $ 0 . x and want to reset the property to undefined every loop iteration . if ( ( decl . isLet ( ) || decl . isConst ( ) ) && ! nameNode . hasChildren ( ) && ( parent == null || ! parent . isForIn ( ) ) && inLoop ( decl ) ) { Node undefined = createUndefinedNode ( ) . srcref ( nameNode ) ; nameNode . addChildToFront ( undefined ) ; compiler . reportChangeToEnclosingScope ( undefined ) ; } String oldName = nameNode . getString ( ) ; Scope hoistScope = scope . getClosestHoistScope ( ) ; if ( scope != hoistScope ) { String newName = oldName ; if ( hoistScope . hasSlot ( oldName ) || undeclaredNames . contains ( oldName ) ) { do { newName = oldName + "$" + compiler . getUniqueNameIdSupplier ( ) . get ( ) ; } while ( hoistScope . hasSlot ( newName ) ) ; nameNode . setString ( newName ) ; compiler . reportChangeToEnclosingScope ( nameNode ) ; Node scopeRoot = scope . getRootNode ( ) ; renameTable . put ( scopeRoot , oldName , newName ) ; } Var oldVar = scope . getVar ( oldName ) ; scope . undeclare ( oldVar ) ; hoistScope . declare ( newName , nameNode , oldVar . input ) ; }
public class FilePolicyIndex { /** * Determine name of policy from file . . xml prefix is removed , and name is converted to a PID . * Policy names must be valid PIDs . * @ param policyFile * @ return * @ throws PolicyIndexException */ private String fileToName ( File policyFile ) throws PolicyIndexException { } }
try { if ( ! policyFile . getName ( ) . endsWith ( ".xml" ) ) throw new PolicyIndexException ( "Invalid policy file name. Policy files must end in .xml - " + policyFile . getName ( ) ) ; return PID . fromFilename ( policyFile . getName ( ) . substring ( 0 , policyFile . getName ( ) . lastIndexOf ( ".xml" ) ) ) . toString ( ) ; } catch ( MalformedPIDException e ) { throw new PolicyIndexException ( "Invalid policy file name. Filename cannot be converted to a valid PID - " + policyFile . getName ( ) ) ; }
public class Gamma { /** * Regularized Incomplete Gamma Function P ( a , x ) = < i > < big > & # 8747 ; < / big > < sub > < small > 0 < / small > < / sub > < sup > < small > x < / small > < / sup > e < sup > - t < / sup > t < sup > ( a - 1 ) < / sup > dt < / i > . * Series representation of the function - valid for x < a + 1 */ private static double regularizedIncompleteGammaSeries ( double a , double x ) { } }
if ( a < 0.0 || x < 0.0 || x >= a + 1 ) { throw new IllegalArgumentException ( String . format ( "Invalid a = %f, x = %f" , a , x ) ) ; } int i = 0 ; double igf = 0.0 ; boolean check = true ; double acopy = a ; double sum = 1.0 / a ; double incr = sum ; double loggamma = lgamma ( a ) ; while ( check ) { ++ i ; ++ a ; incr *= x / a ; sum += incr ; if ( Math . abs ( incr ) < Math . abs ( sum ) * INCOMPLETE_GAMMA_EPSILON ) { igf = sum * Math . exp ( - x + acopy * Math . log ( x ) - loggamma ) ; check = false ; } if ( i >= INCOMPLETE_GAMMA_MAX_ITERATIONS ) { check = false ; igf = sum * Math . exp ( - x + acopy * Math . log ( x ) - loggamma ) ; logger . error ( "Gamma.regularizedIncompleteGammaSeries: Maximum number of iterations wes exceeded" ) ; } } return igf ;
public class ViewHelper { public static void setBackgroundColor ( int color , final View view ) { } }
if ( color == MULTICOLOR ) { ShapeDrawable . ShaderFactory sf = new ShapeDrawable . ShaderFactory ( ) { @ Override public Shader resize ( int width , int height ) { return new LinearGradient ( 0 , 0 , 0 , height , new int [ ] { 0xFFFF0000 , 0xFF0000FF , 0xFF00FF00 } , new float [ ] { 0.1f , 0.5f , 0.9f } , Shader . TileMode . REPEAT ) ; } } ; PaintDrawable paintDrawable = new PaintDrawable ( ) ; paintDrawable . setShape ( new RectShape ( ) ) ; paintDrawable . setShaderFactory ( sf ) ; view . setBackgroundDrawable ( paintDrawable ) ; } else if ( color == BLACK_WHITE ) { ShapeDrawable . ShaderFactory sf = new ShapeDrawable . ShaderFactory ( ) { @ Override public Shader resize ( int width , int height ) { return new LinearGradient ( 0 , 0 , 0 , height , new int [ ] { 0xFFFFFFFF , 0xFF000000 } , new float [ ] { 0f , 1f } , Shader . TileMode . REPEAT ) ; } } ; PaintDrawable paintDrawable = new PaintDrawable ( ) ; paintDrawable . setShape ( new RectShape ( ) ) ; paintDrawable . setShaderFactory ( sf ) ; view . setBackgroundDrawable ( paintDrawable ) ; } else { view . setBackgroundColor ( color ) ; }
public class CmsJspTagHeadIncludes { /** * Gets the head includes of a resource from the content definition . < p > * @ param cms the current CMS context * @ param res the resource for which the head includes should be fetched * @ param type the head include type ( CSS or Javascript ) * @ return the set of schema head includes * @ throws CmsLoaderException if something goes wrong */ private Set < String > getSchemaHeadIncludes ( CmsObject cms , CmsResource res , String type ) throws CmsLoaderException { } }
if ( type . equals ( TYPE_CSS ) ) { return getCSSHeadIncludes ( cms , res ) ; } else if ( type . equals ( TYPE_JAVASCRIPT ) ) { return getJSHeadIncludes ( cms , res ) ; } return null ;
public class BuildingPartType { /** * Gets the value of the genericApplicationPropertyOfBuildingPart property . * This accessor method returns a reference to the live list , * not a snapshot . Therefore any modification you make to the * returned list will be present inside the JAXB object . * This is why there is not a < CODE > set < / CODE > method for the genericApplicationPropertyOfBuildingPart property . * For example , to add a new item , do as follows : * < pre > * get _ GenericApplicationPropertyOfBuildingPart ( ) . add ( newItem ) ; * < / pre > * Objects of the following type ( s ) are allowed in the list * { @ link JAXBElement } { @ code < } { @ link Object } { @ code > } * { @ link JAXBElement } { @ code < } { @ link Object } { @ code > } */ public List < JAXBElement < Object > > get_GenericApplicationPropertyOfBuildingPart ( ) { } }
if ( _GenericApplicationPropertyOfBuildingPart == null ) { _GenericApplicationPropertyOfBuildingPart = new ArrayList < JAXBElement < Object > > ( ) ; } return this . _GenericApplicationPropertyOfBuildingPart ;
public class BeagleMain { /** * { @ inheritDoc } */ public SemanticSpace getSpace ( ) { } }
SemanticType type = ( argOptions . hasOption ( 's' ) ) ? SemanticType . valueOf ( argOptions . getStringOption ( 's' ) . toUpperCase ( ) ) : SemanticType . COMPOSITE ; return new Beagle ( dimension , type , generatorMap ) ;
public class PassThruTable { /** * Free this passthrutable and all linked tables in this chain . */ public void free ( ) { } }
if ( m_mapTable != null ) { Iterator < BaseTable > iterator = this . getTables ( ) ; while ( iterator . hasNext ( ) ) { BaseTable table = iterator . next ( ) ; if ( ( table != null ) && ( table != this . getNextTable ( ) ) ) { Record record = table . getRecord ( ) ; if ( record != null ) record . free ( ) ; } } m_mapTable . clear ( ) ; m_mapTable = null ; } if ( m_tableNext != null ) { BaseTable baseTable = m_tableNext ; m_tableNext = null ; // This will prevent the record from being freed ( freed in prev . line ) baseTable . free ( ) ; // This will also free the record m_record = null ; // Being paranoid } super . free ( ) ;
public class IcuSyntaxUtils { /** * Given a list of msg parts : ( a ) if it contains any plural / select parts , then builds a new list * of msg parts where plural / select parts in the original msg parts are all embedded as raw text * in ICU format , ( b ) if it doesn ' t contain any plural / select parts , then simply returns the * original msg parts instead of creating a new list of identical msg parts . * @ param origMsgParts The msg parts to convert . * @ return A new list of msg parts with embedded ICU syntax if the original msg parts contain * plural / select parts , otherwise the original msg parts . */ public static ImmutableList < SoyMsgPart > convertMsgPartsToEmbeddedIcuSyntax ( ImmutableList < SoyMsgPart > origMsgParts ) { } }
// If origMsgParts doesn ' t have plural / select parts , simply return it . if ( ! MsgPartUtils . hasPlrselPart ( origMsgParts ) ) { return origMsgParts ; } // Build the new msg parts . ImmutableList . Builder < SoyMsgPart > newMsgPartsBuilder = ImmutableList . builder ( ) ; StringBuilder currRawTextSb = new StringBuilder ( ) ; convertMsgPartsHelper ( newMsgPartsBuilder , currRawTextSb , origMsgParts , /* isInPlrselPart = */ false ) ; if ( currRawTextSb . length ( ) > 0 ) { newMsgPartsBuilder . add ( SoyMsgRawTextPart . of ( currRawTextSb . toString ( ) ) ) ; } return newMsgPartsBuilder . build ( ) ;
public class PTBConstituent { /** * setter for adv - sets Adverbials are generally VP adjuncts . * @ generated * @ param v value to set into the feature */ public void setAdv ( String v ) { } }
if ( PTBConstituent_Type . featOkTst && ( ( PTBConstituent_Type ) jcasType ) . casFeat_adv == null ) jcasType . jcas . throwFeatMissing ( "adv" , "de.julielab.jules.types.PTBConstituent" ) ; jcasType . ll_cas . ll_setStringValue ( addr , ( ( PTBConstituent_Type ) jcasType ) . casFeatCode_adv , v ) ;
public class GetMetricStatisticsRequest { /** * The metric statistics , other than percentile . For percentile statistics , use < code > ExtendedStatistics < / code > . * When calling < code > GetMetricStatistics < / code > , you must specify either < code > Statistics < / code > or * < code > ExtendedStatistics < / code > , but not both . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setStatistics ( java . util . Collection ) } or { @ link # withStatistics ( java . util . Collection ) } if you want to * override the existing values . * @ param statistics * The metric statistics , other than percentile . For percentile statistics , use * < code > ExtendedStatistics < / code > . When calling < code > GetMetricStatistics < / code > , you must specify either * < code > Statistics < / code > or < code > ExtendedStatistics < / code > , but not both . * @ return Returns a reference to this object so that method calls can be chained together . * @ see Statistic */ public GetMetricStatisticsRequest withStatistics ( String ... statistics ) { } }
if ( this . statistics == null ) { setStatistics ( new com . amazonaws . internal . SdkInternalList < String > ( statistics . length ) ) ; } for ( String ele : statistics ) { this . statistics . add ( ele ) ; } return this ;
public class RethinkDBSchemaManager { /** * ( non - Javadoc ) * @ see * com . impetus . kundera . configure . schema . api . AbstractSchemaManager # create * ( java . util . List ) */ @ Override protected void create ( List < TableInfo > tableInfos ) { } }
List dbList = r . dbList ( ) . run ( connection ) ; if ( ! dbList . contains ( databaseName ) ) { r . dbCreate ( databaseName ) . run ( connection ) ; } List listTables = r . db ( databaseName ) . tableList ( ) . run ( connection ) ; for ( TableInfo tableInfo : tableInfos ) { try { if ( listTables . contains ( tableInfo . getTableName ( ) ) ) { r . db ( databaseName ) . tableDrop ( tableInfo . getTableName ( ) ) . run ( connection ) ; } } catch ( Exception e ) { logger . error ( "Cannot check table existence for table " + tableInfo . getTableName ( ) + ". Caused By: " + e ) ; throw new KunderaException ( "Cannot check table existence for table " + tableInfo . getTableName ( ) + ". Caused By: " + e ) ; } r . db ( databaseName ) . tableCreate ( tableInfo . getTableName ( ) ) . run ( connection ) ; }
public class GraphQL { /** * Executes the specified graphql query / mutation / subscription * @ param query the query / mutation / subscription * @ return an { @ link ExecutionResult } which can include errors */ public ExecutionResult execute ( String query ) { } }
ExecutionInput executionInput = ExecutionInput . newExecutionInput ( ) . query ( query ) . build ( ) ; return execute ( executionInput ) ;
public class AmqpChannel { /** * Fired on consume basic * @ param e */ private void fireOnConsumeBasic ( ChannelEvent e ) { } }
List < EventListener > listeners = changes . getListenerList ( AMQP ) ; for ( EventListener listener : listeners ) { ChannelListener amqpListener = ( ChannelListener ) listener ; amqpListener . onConsumeBasic ( e ) ; }
public class FLACEncoder { /** * Close the current FLAC stream . Updates the stream header information . * If called on a closed stream , operation is undefined . Do not do this . */ private void closeFLACStream ( ) throws IOException { } }
// reset position in output stream to beginning . // re - write the updated stream info . checkForThreadErrors ( ) ; if ( DEBUG_LEV > 0 ) System . err . println ( "FLACEncoder::closeFLACStream : Begin" ) ; streamLock . lock ( ) ; try { if ( ! flacStreamIsOpen ) throw new IllegalStateException ( "Cannot close a non-opened stream" ) ; byte [ ] md5Hash = md5 . getMD ( ) . digest ( ) ; flacWriter . closeFLACStream ( md5Hash , streamConfig ) ; flacStreamIsOpen = false ; } finally { streamLock . unlock ( ) ; }
public class ExampleStreamingLogWriterPlugin { /** * Add a new event * @ param event */ public void addEvent ( LogEvent event ) { } }
try { write ( getString ( event ) ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; }
public class OidcClientUtil { /** * extra handling */ public String getRedirectUrl ( HttpServletRequest req , String uri ) { } }
String hostName = req . getServerName ( ) ; Integer httpsPort = new com . ibm . ws . security . common . web . WebUtils ( ) . getRedirectPortFromRequest ( req ) ; String entryPoint = uri ; if ( httpsPort == null && req . isSecure ( ) ) { // TODO : need to specify SSL _ PORT _ IS _ NULL message // Tr . error ( tc , " SSL _ PORT _ IS _ NULL " ) ; int port = req . getServerPort ( ) ; // return whatever in the req String httpSchema = ( ( javax . servlet . ServletRequest ) req ) . getScheme ( ) ; return httpSchema + "://" + hostName + ( port > 0 && port != 443 ? ":" + port : "" ) + entryPoint ; } else { return "https://" + hostName + ( httpsPort == null ? "" : ":" + httpsPort ) + entryPoint ; }
public class JfxNativeWorker { /** * Sometimes we need to work with some bundler , even if it wasn ' t requested . This happens when one bundler was selected and we need * to work with the outcome of some image - bundler ( because that JDK - bundler is faulty ) . */ private boolean shouldBundlerRun ( String requestedBundler , String currentRunningBundlerID , JavaFXGradlePluginExtension ext , final Logger logger , Map < String , ? super Object > params ) { } }
if ( requestedBundler != null && ! "ALL" . equalsIgnoreCase ( requestedBundler ) && ! requestedBundler . equalsIgnoreCase ( currentRunningBundlerID ) ) { // this is not the specified bundler return false ; } if ( ext . isSkipJNLP ( ) && "jnlp" . equalsIgnoreCase ( currentRunningBundlerID ) ) { logger . info ( "Skipped JNLP-bundling as requested." ) ; return false ; } boolean runBundler = true ; // Workaround for native installer bundle not creating working executable native launcher // ( this is a comeback of issue 124) // https : / / github . com / javafx - maven - plugin / javafx - maven - plugin / issues / 205 // do run application bundler and put the cfg - file to application resources if ( System . getProperty ( "os.name" ) . toLowerCase ( ) . startsWith ( "linux" ) ) { if ( workarounds . isWorkaroundForBug205Needed ( ) ) { // check if special conditions for this are met ( not jnlp , but not linux . app too , because another workaround already works ) if ( ! "jnlp" . equalsIgnoreCase ( requestedBundler ) && ! "linux.app" . equalsIgnoreCase ( requestedBundler ) && "linux.app" . equalsIgnoreCase ( currentRunningBundlerID ) ) { if ( ! ext . isSkipNativeLauncherWorkaround205 ( ) ) { logger . info ( "Detected linux application bundler ('linux.app') needs to run before installer bundlers are executed." ) ; runBundler = true ; params . put ( CFG_WORKAROUND_MARKER , "true" ) ; } else { logger . info ( "Skipped workaround for native linux installer bundlers." ) ; } } } } return runBundler ;
public class ScreenUtil { /** * Set this property to this color . * ( Utility method ) . * @ param strProperty The key to save this color as . * @ param color The registered color for this property key . */ public static void setColor ( String strProperty , ColorUIResource color , PropertyOwner propertyOwner , Map < String , Object > properties ) { } }
if ( color != null ) ScreenUtil . setProperty ( strProperty , "#" + Integer . toHexString ( color . getRGB ( ) & 0xFFFFFF ) , propertyOwner , properties ) ; else ScreenUtil . setProperty ( strProperty , null , propertyOwner , properties ) ;
public class NBTIO { /** * Writes the given root CompoundTag to the given file . * @ param tag Tag to write . * @ param file File to write to . * @ param compressed Whether the NBT file should be compressed . * @ param littleEndian Whether to write little endian NBT . * @ throws java . io . IOException If an I / O error occurs . */ public static void writeFile ( CompoundTag tag , File file , boolean compressed , boolean littleEndian ) throws IOException { } }
if ( ! file . exists ( ) ) { if ( file . getParentFile ( ) != null && ! file . getParentFile ( ) . exists ( ) ) { file . getParentFile ( ) . mkdirs ( ) ; } file . createNewFile ( ) ; } OutputStream out = new FileOutputStream ( file ) ; if ( compressed ) { out = new GZIPOutputStream ( out ) ; } writeTag ( out , tag , littleEndian ) ; out . close ( ) ;
public class ProClass { /** * Construct a new instance of { @ code className } using { @ code parameters } . If the class cannot be loaded * { @ code loadFailure } will be invoked and { @ code null } is returned . If the class cannot be constructed * { @ link VoltDB # crashLocalVoltDB ( String , boolean , Throwable ) } will be invoked with the instantiation exception . * @ param className The class name of the PRO class * @ param feature The name of the feature * @ param loadFailure { @ link ErrorHandler } for handling when class loading fails * @ param parameters to pass to the constructor of { @ code className } * @ return an instance of { @ code T } */ public static < T > T newInstanceOf ( String className , String feature , ErrorHandler loadFailure , Object ... parameters ) { } }
return ProClass . < T > load ( className , feature , loadFailure ) . newInstance ( parameters ) ;
public class LancasterStemmer { /** * Strips suffix off word */ private String stripSuffixes ( String word ) { } }
// integer variables 1 is positive , 0 undecided , - 1 negative equiverlent of pun vars positive undecided negative int ruleok = 0 ; int Continue = 0 ; // integer varables int pll = 0 ; // position of last letter int xl ; // counter for nuber of chars to be replaced and length of stemmed word if rule was aplied int pfv ; // poition of first vowel int prt ; // pointer into rule table int ir ; // index of rule int iw ; // index of word // char variables char ll ; // last letter // String variables eqiverlent of tenchar variables String rule = "" ; // varlable holding the current rule String stem = "" ; // string holding the word as it is being stemmed this is returned as a stemmed word . // boolean varable boolean intact = true ; // intact if the word has not yet been stemmed to determin a requirement of some stemming rules // set stem = to word stem = cleanup ( word . toLowerCase ( ) ) ; // set the position of pll to the last letter in the string pll = 0 ; // move through the word to find the position of the last letter before a non letter char while ( ( pll + 1 < stem . length ( ) ) && ( ( stem . charAt ( pll + 1 ) >= 'a' ) && ( stem . charAt ( pll + 1 ) <= 'z' ) ) ) { pll ++ ; } if ( pll < 1 ) { Continue = - 1 ; } // find the position of the first vowel pfv = firstVowel ( stem , pll ) ; iw = stem . length ( ) - 1 ; // repeat until continue = = negative ie . - 1 while ( Continue != - 1 ) { Continue = 0 ; // SEEK RULE FOR A NEW FINAL LETTER ll = stem . charAt ( pll ) ; // last letter // Check to see if there are any possible rules for stemming if ( ( ll >= 'a' ) && ( ll <= 'z' ) ) { prt = index [ charCode ( ll ) ] ; // pointer into rule - table } else { prt = - 1 ; // 0 is a vaild rule } if ( prt == - 1 ) { Continue = - 1 ; // no rule available } if ( Continue == 0 ) { // THERE IS A POSSIBLE RULE ( OR RULES ) : SEE IF ONE WORKS rule = rules . get ( prt ) ; // Take first rule while ( Continue == 0 ) { ruleok = 0 ; if ( rule . charAt ( 0 ) != ll ) { // rule - letter changes Continue = - 1 ; ruleok = - 1 ; } ir = 1 ; // index of rule : 2nd character iw = pll - 1 ; // index of word : next - last letter // repeat untill the rule is not undecided find a rule that is acceptable while ( ruleok == 0 ) { if ( ( rule . charAt ( ir ) >= '0' ) && ( rule . charAt ( ir ) <= '9' ) ) // rule fully matched { ruleok = 1 ; } else if ( rule . charAt ( ir ) == '*' ) { // match only if word intact if ( intact ) { ir = ir + 1 ; // move forwards along rule ruleok = 1 ; } else { ruleok = - 1 ; } } else if ( rule . charAt ( ir ) != stem . charAt ( iw ) ) { // mismatch of letters ruleok = - 1 ; } else if ( iw <= pfv ) { // insufficient stem remains ruleok = - 1 ; } else { // move on to compare next pair of letters ir = ir + 1 ; // move forwards along rule iw = iw - 1 ; // move backwards along word } } // if the rule that has just been checked is valid if ( ruleok == 1 ) { // CHECK ACCEPTABILITY CONDITION FOR PROPOSED RULE xl = 0 ; // count any replacement letters while ( ! ( ( rule . charAt ( ir + xl + 1 ) >= '.' ) && ( rule . charAt ( ir + xl + 1 ) <= '>' ) ) ) { xl ++ ; } xl = pll + xl + 48 - ( ( int ) ( rule . charAt ( ir ) ) ) ; // position of last letter if rule used if ( pfv == 0 ) { // if word starts with vowel . . . if ( xl < 1 ) { // . . . minimal stem is 2 letters ruleok = - 1 ; } else { // ruleok = 1 ; as ruleok must alread be positive to reach this stage } } // if word start swith consonant . . . else if ( ( xl < 2 ) | ( xl < pfv ) ) { ruleok = - 1 ; // . . . minimal stem is 3 letters . . . // . . . including one or more vowel } else { // ruleok = 1 ; as ruleok must alread be positive to reach this stage } } // if using the rule passes the assertion tests if ( ruleok == 1 ) { // APPLY THE MATCHING RULE intact = false ; // move end of word marker to position . . . // . . . given by the numeral . pll = pll + 48 - ( ( int ) ( rule . charAt ( ir ) ) ) ; ir ++ ; stem = stem . substring ( 0 , ( pll + 1 ) ) ; // append any letters following numeral to the word while ( ( ir < rule . length ( ) ) && ( ( 'a' <= rule . charAt ( ir ) ) && ( rule . charAt ( ir ) <= 'z' ) ) ) { stem += rule . charAt ( ir ) ; ir ++ ; pll ++ ; } // if rule ends with ' . ' then terminate if ( ( rule . charAt ( ir ) ) == '.' ) { Continue = - 1 ; } else { // if rule ends with ' > ' then Continue Continue = 1 ; } } else { // if rule did not match then look for another prt = prt + 1 ; // move to next rule in RULETABLE if ( prt >= rules . size ( ) ) { Continue = - 1 ; } else { rule = rules . get ( prt ) ; if ( rule . charAt ( 0 ) != ll ) { // rule - letter changes Continue = - 1 ; } } } } } } return stem ;
public class VpnSitesInner { /** * Updates VpnSite tags . * @ param resourceGroupName The resource group name of the VpnSite . * @ param vpnSiteName The name of the VpnSite being updated . * @ param tags Resource tags . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws ErrorException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the VpnSiteInner object if successful . */ public VpnSiteInner beginUpdateTags ( String resourceGroupName , String vpnSiteName , Map < String , String > tags ) { } }
return beginUpdateTagsWithServiceResponseAsync ( resourceGroupName , vpnSiteName , tags ) . toBlocking ( ) . single ( ) . body ( ) ;
public class Main { /** * This function calculates the total number of set bits from 1 to num . * Examples : * calculateSetBits ( 16) * > > > 33 * calculateSetBits ( 2) * calculateSetBits ( 14) * > > > 28 */ public static int calculateSetBits ( int num ) { } }
num += 1 ; int bitMultiplier = 2 ; int setBitsCount = num / 2 ; while ( bitMultiplier <= num ) { int pairTotal = num / bitMultiplier ; setBitsCount += ( pairTotal / 2 ) * bitMultiplier ; if ( ( pairTotal & 1 ) != 0 ) { setBitsCount += ( num % bitMultiplier ) ; } bitMultiplier <<= 1 ; } return setBitsCount ;
public class PoolManager { /** * This method reserves connection . If unused connection exists , it is returned , * otherwise new connection is created using ManagedConnectionFactory . * @ param Subject connection security context * @ param ConnectionRequestInfo requestInfo * @ param Object affinity * @ param boolean connectionSharing * @ param boolean enforceSerialReuse * @ param int commitPriority * @ return MCWrapper * @ concurrency concurrent * @ throws ResourceException * @ throws ResourceAllocationException */ public MCWrapper reserve ( ManagedConnectionFactory managedConnectionFactory , Subject subject , ConnectionRequestInfo requestInfo , Object affinity , boolean connectionSharing , boolean enforceSerialReuse , int commitPriority , int branchCoupling ) throws javax . resource . ResourceException , ResourceAllocationException { } }
final boolean isTracingEnabled = TraceComponent . isAnyTracingEnabled ( ) ; // boolean normalSharing = true ; // This is to be a parm past in when j2c code is ready // If this is false we don ' t check the for an existing shared // Connection , we hand out a new shared connection . if ( isTracingEnabled && tc . isEntryEnabled ( ) ) Tr . entry ( this , tc , "reserve" ) ; if ( isTracingEnabled && tc . isDebugEnabled ( ) ) { StringBuffer sbuff = new StringBuffer ( 250 ) ; sbuff . append ( "input parms... " ) ; sbuff . append ( nl ) ; sbuff . append ( " subject = " ) ; if ( subject == null ) { sbuff . append ( "null" ) ; } else { // synchronized ( this ) { SubjectToString subjectToString = new SubjectToString ( ) ; subjectToString . setSubject ( subject ) ; sbuff . append ( AccessController . doPrivileged ( subjectToString ) ) ; } sbuff . append ( " affinity = " ) ; sbuff . append ( affinity ) ; sbuff . append ( nl ) ; sbuff . append ( " Shared connection = " ) ; sbuff . append ( connectionSharing ) ; sbuff . append ( nl ) ; sbuff . append ( " Force new MC = " ) ; sbuff . append ( enforceSerialReuse ) ; sbuff . append ( nl ) ; sbuff . append ( " commitPriority = " ) ; sbuff . append ( commitPriority ) ; sbuff . append ( nl ) ; sbuff . append ( " branchCoupling = " ) ; sbuff . append ( branchCoupling ) ; sbuff . append ( nl ) ; sbuff . append ( " Connection Request Information = " ) ; sbuff . append ( requestInfo ) ; Tr . debug ( this , tc , sbuff . toString ( ) ) ; Tr . debug ( this , tc , "reserve(), Pool contents ==> " + this . toString2 ( 1 ) ) ; } /* * Added for holding out connection request while * a free or shared pool is being updated */ requestingAccessToPool ( ) ; // Count the number of managed connection on this thread . // If we find matching thread , // add to a counter and check if we exceeded the max number // allowed set by customer using custom properties maxNumberOfMCsAllowableInThread . Throw // an exception if we exceed the number . : - ) if ( maxNumberOfMCsAllowableInThread == 0 ) { // don ' t check the number of connections when using a lightweight server // and maxNumberOfMCsAllowableInThread is 0 ( in the lighweight server 0 means no limit ) } else if ( maxNumberOfMCsAllowableInThread > 0 ) { // if the custom property is set , lets use it . checkForMCsOnThread ( maxNumberOfMCsAllowableInThread ) ; } if ( ( isTracingEnabled && tc . isDebugEnabled ( ) ) ) { ++ totalPoolConnectionRequests ; } com . ibm . ws . j2c . MCWrapper mcWrapper = null ; int sharedbucket = 0 ; /* * Check affinity and connectionSharing . We need to check for a shared connection * first . If a matching one exists , reuse it . */ if ( affinity != null && connectionSharing ) { // Start - - thread local fast path . . . if ( localConnection_ != null ) { ArrayList < MCWrapper > mh = localConnection_ . get ( ) ; if ( mh != null ) { requestingAccessToTLSPool ( ) ; int arraySize = mh . size ( ) ; if ( arraySize > 0 ) { MCWrapper freeLocalConnection = null ; if ( arraySize == 1 ) { // fast path , we only have one . MCWrapper localConnection = mh . get ( 0 ) ; if ( localConnection . getPoolState ( ) == MCWrapper . ConnectionState_freeTLSPool ) { // We have a free connection , save it and use it if we do not have a matching affinity . freeLocalConnection = getMCWrapperFromMatch ( subject , requestInfo , managedConnectionFactory , localConnection ) ; // Need to add code for handling resource exception . } else if ( localConnection . getPoolState ( ) == MCWrapper . ConnectionState_sharedTLSPool ) { if ( localConnection . getSharedPoolCoordinator ( ) != null && localConnection . getSharedPoolCoordinator ( ) . equals ( affinity ) && isBranchCouplingCompatible ( commitPriority , branchCoupling , localConnection ) ) { /* * No call to matchManagedConnection is occurring to check for matching connection . * When this feature is enabled , its assumed user of the feature know what they are doing . * I am tempted to add the matching code here , but , not sure of the performance hit . */ // MCWrapper mcwtemp = getMCWrapperFromMatch ( subject , requestInfo , managedConnectionFactory , localConnection ) ; / / Need to add code for handling resource exception . if ( isCRIsMatching ( requestInfo , localConnection ) && isSubjectsMatching ( subject , localConnection ) ) { // we have a matching connection . if ( enforceSerialReuse && ( localConnection . getHandleCount ( ) >= 1 ) ) { logLTCSerialReuseInfo ( affinity , gConfigProps . cfName , localConnection , this ) ; } // end enforceSerialReuse & & ( mcWrapperTemp . getHandleCount ( ) > = 1) else { endingAccessToTLSPool ( ) ; activeRequest . decrementAndGet ( ) ; if ( isTracingEnabled && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "reserve" , new Object [ ] { localConnection , localConnection . getManagedConnection ( ) } ) ; return localConnection ; } } } } } else { for ( int i = 0 ; i < arraySize ; ++ i ) { MCWrapper localConnection = mh . get ( i ) ; // check for affinity . check pool state to see if its active . // if the checks are matching , then life is good . if ( localConnection . getPoolState ( ) == MCWrapper . ConnectionState_freeTLSPool ) { // We have a free connection , save it and use it if we do not have a matching affinity . if ( freeLocalConnection == null ) { freeLocalConnection = getMCWrapperFromMatch ( subject , requestInfo , managedConnectionFactory , localConnection ) ; // Need to add code for handling resource exception . } } else if ( localConnection . getPoolState ( ) == MCWrapper . ConnectionState_sharedTLSPool ) { if ( localConnection . getSharedPoolCoordinator ( ) != null && localConnection . getSharedPoolCoordinator ( ) . equals ( affinity ) && isBranchCouplingCompatible ( commitPriority , branchCoupling , localConnection ) ) { /* * No call to matchManagedConnection is occurring to check for matching connection . * When this feature is enabled , its assumed user of the feature know what they are doing . * I am tempted to add the matching code here , but , not sure of the performance hit . */ // MCWrapper mcwtemp = getMCWrapperFromMatch ( subject , requestInfo , managedConnectionFactory , localConnection ) ; / / Need to add code for handling resource exception . if ( isCRIsMatching ( requestInfo , localConnection ) && isSubjectsMatching ( subject , localConnection ) ) { // we have a matching connection . if ( enforceSerialReuse && ( localConnection . getHandleCount ( ) >= 1 ) ) { logLTCSerialReuseInfo ( affinity , gConfigProps . cfName , localConnection , this ) ; } // end enforceSerialReuse & & ( mcWrapperTemp . getHandleCount ( ) > = 1) else { endingAccessToTLSPool ( ) ; activeRequest . decrementAndGet ( ) ; if ( isTracingEnabled && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "reserve" , new Object [ ] { localConnection , localConnection . getManagedConnection ( ) } ) ; return localConnection ; } } } } } } if ( freeLocalConnection != null ) { freeLocalConnection . setPoolState ( MCWrapper . ConnectionState_sharedTLSPool ) ; freeLocalConnection . setSharedPoolCoordinator ( affinity ) ; freeLocalConnection . markInUse ( ) ; endingAccessToTLSPool ( ) ; activeRequest . decrementAndGet ( ) ; if ( isTracingEnabled && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "reserve" , new Object [ ] { freeLocalConnection , freeLocalConnection . getManagedConnection ( ) } ) ; return freeLocalConnection ; } } endingAccessToTLSPool ( ) ; } } // End - - thread local fast path . . . /* * Looking in the shared pool for an existing connection with an affinity */ sharedbucket = Math . abs ( affinity . hashCode ( ) % maxSharedBuckets ) ; // Calculate the buck values for the shared bucket if ( isTracingEnabled && tc . isDebugEnabled ( ) ) { Tr . debug ( this , tc , "Searching for shared connection in partition " + sharedbucket ) ; } mcWrapper = sharedPool [ sharedbucket ] . getSharedConnection ( affinity , subject , requestInfo , enforceSerialReuse , gConfigProps . getXpathId ( ) , commitPriority , branchCoupling ) ; // Start - - thread local fast path . . . } else { // The connection is not shared . This is a once only use thread pool . We will look for free // TLS connections , if we find one and move it to inuse , it can not be used again until // returned to the TLS if ( localConnection_ != null ) { ArrayList < MCWrapper > mh = localConnection_ . get ( ) ; if ( mh != null ) { requestingAccessToTLSPool ( ) ; int arraySize = mh . size ( ) ; if ( arraySize > 0 ) { for ( int i = 0 ; i < arraySize ; ++ i ) { MCWrapper localConnection = mh . get ( i ) ; // check for affinity . check pool state to see if its active . // if the checks are matching , then life is good . if ( localConnection . getPoolState ( ) == MCWrapper . ConnectionState_freeTLSPool ) { // We have a free connection , save it and use it if we do not have a matching affinity . MCWrapper mcwtemp = getMCWrapperFromMatch ( subject , requestInfo , managedConnectionFactory , localConnection ) ; // Need to add code for handling resource exception . if ( mcwtemp != null ) { // we have a matching connection . localConnection . setPoolState ( MCWrapper . ConnectionState_unsharedTLSPool ) ; localConnection . markInUse ( ) ; endingAccessToTLSPool ( ) ; activeRequest . decrementAndGet ( ) ; if ( isTracingEnabled && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "reserve" , new Object [ ] { localConnection , localConnection . getManagedConnection ( ) } ) ; return localConnection ; } } } } endingAccessToTLSPool ( ) ; } } // End - - thread local fast path . . . } /* * If mcWrapper is null , we did not find a shared connection . We need to look for an existing * connection in the free pool . */ if ( mcWrapper == null ) { /* * Test connection code added */ if ( ! allowConnectionRequests ) { /* * Need to throw an exception . It is OK to do an un - synchronized read of * the allowConnectionRequests . */ ResourceAllocationException throwMe = null ; if ( connectionPoolShutDown ) { Object [ ] parms = new Object [ ] { "reserve" , "Pool requests blocked, connection pool is being shut down." , "ResourceAllocationException" , gConfigProps . cfName } ; Tr . error ( tc , "POOL_MANAGER_EXCP_CCF2_0002_J2CA0046" , parms ) ; throwMe = new ResourceAllocationException ( "Pool requests blocked for " + gConfigProps . getXpathId ( ) + ", connection pool is being shut down." ) ; } else { Object [ ] parms = new Object [ ] { "reserve" , "Failed preTestConnection. Pool requests blocked until the test connection thread is successful." , "ResourceAllocationException" , gConfigProps . cfName } ; Tr . error ( tc , "POOL_MANAGER_EXCP_CCF2_0002_J2CA0046" , parms ) ; throwMe = new ResourceAllocationException ( "Failed preTestConnection. Pool requests blocked for " + gConfigProps . getXpathId ( ) + " until the test connection thread is successful." ) ; } activeRequest . decrementAndGet ( ) ; if ( isTracingEnabled && tc . isEntryEnabled ( ) ) { Tr . exit ( this , tc , "reserve" , throwMe ) ; } throw ( throwMe ) ; } int hashCode = computeHashCode ( subject , requestInfo ) ; int hashMapBucket = hashCode % maxFreePoolHashSize ; /* * If we have waiters , we don ' t have any free connection , move to the * create or wait code . If we don ' t have any waiters , we need to look for * a free connection . * If connectionPooling is disabled , there is no reason to check the free * pool , go directly to create or wait code . */ if ( waiterCount < 1 && gConfigProps . connectionPoolingEnabled ) { /* * Looking in the free pool for an existing free connection . If one * exist and it matches the Subject and CRI information , we will use it . */ if ( freePool [ hashMapBucket ] . mcWrapperList . size ( ) > 0 ) { mcWrapper = freePool [ hashMapBucket ] . getFreeConnection ( managedConnectionFactory , subject , requestInfo , hashCode ) ; } else { if ( isTracingEnabled && tc . isDebugEnabled ( ) ) { ++ freePool [ hashMapBucket ] . fop_get_notfound ; } } /* * If mcWrapper is null , we need to check the free pool for non - matching * connection . If we find a free connection that does not match and the * free pool has reached its maxConnections , we need to remove the free * connection and create a new connection for this request . */ if ( mcWrapper == null ) { /* * If one user id option is used there will be no victim to claim or * if maxConnections is zero , move on to the create or wait code */ if ( maxConnections != 0 ) { boolean tryToClaimVictim = false ; if ( totalConnectionCount . get ( ) >= maxConnections ) { // remove synchronized block and second totalConnectionCount check /* * We only need to try and claim a victim if we have reached * our connection max . */ tryToClaimVictim = true ; } if ( tryToClaimVictim ) { // We need to look for a victim /* * searchHashMapBucket is the first hashMapBucket being * searched . We only need to search through the hash map bucket * since there is only one free pool bucket . */ int searchHashMapBucket = hashMapBucket ; int tempLocalHashMapBucket = hashMapBucket ; for ( int i = 0 ; i < maxFreePoolHashSize ; ++ i ) { /* * This is not double - checked locking . The following code dirty * reads the size of the mcWrapperList that may be change at * any time by another thread . If it is greater than zero , we * need to synchronize and check the value again . If it is * still greater than zero , we have one or more mcWrappers to * work with . In order to work with the mcWrapperList , we need * to be synchronized . */ if ( freePool [ searchHashMapBucket ] . mcWrapperList . size ( ) > 0 ) { try { synchronized ( freePool [ searchHashMapBucket ] . freeConnectionLockObject ) { if ( freePool [ searchHashMapBucket ] . mcWrapperList . size ( ) > 0 ) { /* * claimVictim will return a true if in has claimed a * victim , Since we have locked this free pool , we * know a victim will be claimed . */ mcWrapper = claimVictim ( managedConnectionFactory , searchHashMapBucket , subject , requestInfo ) ; } } // end sync , only need the sync for claimVictim . if ( mcWrapper == null ) { mcWrapper = freePool [ hashMapBucket ] . createOrWaitForConnection ( managedConnectionFactory , subject , requestInfo , hashMapBucket , maxFreePoolHashSize , true , connectionSharing , hashCode ) ; } break ; // } / / end sync - moved sync up , since we do not need it for the createorwaitforConnections and some resource adapters have not been returning from createManagedConnection } catch ( ConnectionWaitTimeoutException e ) { /* * All we need to do is throw the * ConnectionWaitTimeoutException exception . */ throw e ; } catch ( ResourceAllocationException e ) { /* * Start of new code for defect The following * notify code was moved from the create or wait code due * to the free pool lock required at the time . * We need to reduce the totalConnectionCount and notify a * waiter if one exists . This will allow the wait a chance * at creating a connection to return to the requester . */ if ( e . getCause ( ) instanceof InterruptedException ) { if ( isTracingEnabled && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Thread was interrupted, skipping decrement of total connection count" ) ; } synchronized ( waiterFreePoolLock ) { if ( waiterCount > 0 ) { waiterFreePoolLock . notify ( ) ; } } } else { synchronized ( waiterFreePoolLock ) { int totalCount = this . totalConnectionCount . decrementAndGet ( ) ; if ( isTracingEnabled && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Decrement of total connection count " + totalCount ) ; } if ( waiterCount > 0 ) { waiterFreePoolLock . notify ( ) ; } } } throw e ; } } /* * move to the next hash bucket to be searched */ searchHashMapBucket = ( ++ tempLocalHashMapBucket ) % maxFreePoolHashSize ; } } } /* * If mcWrapper is null , will create a new connection or wait for a * connection to become available . */ if ( mcWrapper == null ) { try { mcWrapper = freePool [ hashMapBucket ] . createOrWaitForConnection ( managedConnectionFactory , subject , requestInfo , hashMapBucket , maxFreePoolHashSize , false , connectionSharing , hashCode ) ; } catch ( ConnectionWaitTimeoutException e ) { /* * All we need to do is throw the ConnectionWaitTimeoutException * exception . */ throw e ; } catch ( ResourceAllocationException e ) { /* * Start of new code for defect The following notify code * was moved from the create or wait code due to the free pool * lock required at the time . * We need to reduce the totalConnectionCount and notify a waiter * if one exists . This will allow the wait a chance at creating a * connection to return to the requester . */ if ( e . getCause ( ) instanceof InterruptedException ) { if ( isTracingEnabled && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Thread was interrupted, skipping decrement of total connection count" ) ; } synchronized ( waiterFreePoolLock ) { if ( waiterCount > 0 ) { waiterFreePoolLock . notify ( ) ; } } } else { synchronized ( waiterFreePoolLock ) { int totalCount = this . totalConnectionCount . decrementAndGet ( ) ; if ( isTracingEnabled && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Decrement of total connection count " + totalCount ) ; } if ( waiterCount > 0 ) { waiterFreePoolLock . notify ( ) ; } } } throw e ; } } } } // end if waiterCount > 0 else { if ( ( isTracingEnabled && tc . isDebugEnabled ( ) ) ) { ++ waitSkip ; } /* * if waiterCount is one or greater there are no connection available , * Call the createOrWaitForConnection code */ try { mcWrapper = freePool [ hashMapBucket ] . createOrWaitForConnection ( managedConnectionFactory , subject , requestInfo , hashMapBucket , maxFreePoolHashSize , false , connectionSharing , hashCode ) ; } catch ( ConnectionWaitTimeoutException e ) { /* * All we need to do is throw the ConnectionWaitTimeoutException * exception . */ throw e ; } catch ( ResourceAllocationException e ) { /* * Start of new code for defect The following notify code was * moved from the create or wait code due to the free pool lock * required at the time . * We need to reduce the totalConnectionCount and notify a waiter if * one exists . This will allow the wait a chance at creating a * connection to return to the requester . */ if ( e . getCause ( ) instanceof InterruptedException ) { if ( isTracingEnabled && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Thread was interrupted, skipping decrement of total connection count" ) ; } synchronized ( waiterFreePoolLock ) { if ( waiterCount > 0 ) { waiterFreePoolLock . notify ( ) ; } } } else { synchronized ( waiterFreePoolLock ) { int totalCount = this . totalConnectionCount . decrementAndGet ( ) ; if ( isTracingEnabled && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Decrement of total connection count " + totalCount ) ; } if ( waiterCount > 0 ) { waiterFreePoolLock . notify ( ) ; } } } throw e ; } } // if the mcWrapper is null , the following code will not be executed . ManagedConnection mc = mcWrapper . getManagedConnection ( ) ; if ( ( ( managedConnectionFactory instanceof WSManagedConnectionFactory && ( ( WSManagedConnectionFactory ) managedConnectionFactory ) . isPooledConnectionValidationEnabled ( ) ) || ( ( com . ibm . ejs . j2c . MCWrapper ) mcWrapper ) . isPretestThisConnection ( ) ) && gConfigProps . validatingMCFSupported ) { /* * Reset pretest value */ ( ( com . ibm . ejs . j2c . MCWrapper ) mcWrapper ) . setPretestThisConnection ( false ) ; /* * We need to test the connection before we return the mcWrapper from * the free pool . Note : We do not need to test shared connections * because the connection is already being used . If there is a * connection problem , it should fail in the application using the * shared connection . */ int poolState = mcWrapper . getPoolState ( ) ; mcWrapper . setPoolState ( 50 ) ; ValidatingManagedConnectionFactory validatingMCF = ( ( ValidatingManagedConnectionFactory ) managedConnectionFactory ) ; Set < ? > invalid = validatingMCF . getInvalidConnections ( Collections . singleton ( mc ) ) ; if ( invalid . isEmpty ( ) ) mcWrapper . setPoolState ( poolState ) ; else { /* * Before we try to create a new connection , we need to destroy the * connection that failed the preTestConnection */ freePool [ 0 ] . cleanupAndDestroyMCWrapper ( mcWrapper ) ; /* * We are going to try calling the test connection again . */ try { /* * Try to create a new connection . */ mcWrapper = freePool [ 0 ] . createManagedConnectionWithMCWrapper ( managedConnectionFactory , subject , requestInfo , connectionSharing , hashCode ) ; mcWrapper . setHashMapBucket ( hashMapBucket ) ; } catch ( ResourceException re ) { preTestFailed ( managedConnectionFactory , subject , requestInfo , hashMapBucket , re ) ; } mc = mcWrapper . getManagedConnection ( ) ; invalid = validatingMCF . getInvalidConnections ( Collections . singleton ( mc ) ) ; if ( invalid . isEmpty ( ) ) allowConnectionRequests = true ; else preTestFailed ( managedConnectionFactory , subject , requestInfo , hashMapBucket , new ResourceAllocationException ( ) ) ; } } mcWrapper . markInUse ( ) ; if ( gConfigProps . raSupportsReauthentication ) { mcWrapper . setHashMapBucketReAuth ( hashMapBucket ) ; } /* * While the connection is inuse we need continued support of WAS pooling * functions except for pooling connections in the free pool . By * commenting out the following check , we will store inuse connection in * the shared or unshared pools . When the connection is close and the * transaction committed , the connection will not be pooled in the free * pool . */ // if ( gConfigProps . connectionPoolingEnabled ) { if ( affinity != null && connectionSharing && ! ( ( com . ibm . ejs . j2c . MCWrapper ) mcWrapper ) . isEnlistmentDisabled ( ) ) { // add to shared pool , 723884 if ( gConfigProps . isConnectionSynchronizationProvider ( ) ) { /* * If we are a SynchronizationProvider and shareable , log a message * and put this connection in the unshareable pool . Connection sharing * is not allowed . */ if ( isTracingEnabled && tc . isDebugEnabled ( ) ) { Tr . debug ( this , tc , "Shareable connections are not allowed with connections that are a SynchronizationProvider. This connection will not be shareable." ) ; } mcWrapper . setPoolState ( 3 ) ; mcWrapper . setInSharedPool ( false ) ; } else { // save on thread local ? if ( isThreadLocalConnectionEnabled && localConnection_ != null ) { ArrayList < MCWrapper > mh = localConnection_ . get ( ) ; requestingAccessToTLSPool ( ) ; if ( mh . size ( ) < maxCapacity ) { mcWrapper . setPoolState ( MCWrapper . ConnectionState_sharedTLSPool ) ; mcWrapper . setSharedPoolCoordinator ( affinity ) ; mh . add ( mcWrapper ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { ( ( com . ibm . ejs . j2c . MCWrapper ) mcWrapper ) . setThreadID ( ( ( com . ibm . ejs . j2c . MCWrapper ) mcWrapper ) . getThreadID ( ) + "-reserve-added" ) ; Tr . debug ( this , tc , "Added mcWrapper from thread local " + mcWrapper ) ; } tlsArrayLists . put ( mcWrapper , mh ) ; } else { sharedPool [ sharedbucket ] . setSharedConnection ( affinity , mcWrapper ) ; mcWrapper . setInSharedPool ( true ) ; } endingAccessToTLSPool ( ) ; // localConnection _ . set ( mh ) ; } else { sharedPool [ sharedbucket ] . setSharedConnection ( affinity , mcWrapper ) ; mcWrapper . setInSharedPool ( true ) ; } // Not creating the parked connection ConnectionManager . parkHandle is not used . } } else { // add it to the used pool . if ( isThreadLocalConnectionEnabled && localConnection_ != null ) { ArrayList < MCWrapper > mh = localConnection_ . get ( ) ; requestingAccessToTLSPool ( ) ; if ( mh . size ( ) < maxCapacity ) { mcWrapper . setPoolState ( MCWrapper . ConnectionState_unsharedTLSPool ) ; // mcWrapper . setSharedPoolCoordinator ( affinity ) ; mh . add ( mcWrapper ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { ( ( com . ibm . ejs . j2c . MCWrapper ) mcWrapper ) . setThreadID ( ( ( com . ibm . ejs . j2c . MCWrapper ) mcWrapper ) . getThreadID ( ) + "-reserve-added" ) ; Tr . debug ( this , tc , "Added mcWrapper from thread local " + mcWrapper ) ; } tlsArrayLists . put ( mcWrapper , mh ) ; } else { mcWrapper . setPoolState ( 3 ) ; mcWrapper . setInSharedPool ( false ) ; } endingAccessToTLSPool ( ) ; // localConnection _ . set ( mh ) ; } else { mcWrapper . setPoolState ( MCWrapper . ConnectionState_unsharedPool ) ; mcWrapper . setInSharedPool ( false ) ; } } } /* * Check to see if trace has been turned on for the managed connection . */ if ( traceWriter . isTraceEnabled ( ) ) { /* * If trace has been turned on , check to see if we already set the log * writer . */ if ( ! mcWrapper . isLogWriterSet ( ) ) { /* * Set the log writer on all mc ' s */ turnOnLogWriter ( ) ; } } else { /* * If trace has been turned off , check to see if we already set the log * writer to null . */ if ( mcWrapper . isLogWriterSet ( ) ) { /* * Set a null log writer on the mc */ turnOffLogWriter ( ) ; } } activeRequest . decrementAndGet ( ) ; if ( isTracingEnabled && tc . isDebugEnabled ( ) ) { if ( mcWrapper . getPoolState ( ) == 3 ) { // unshared connections may or may not be in a transaction , but they can be in a scope of a transaction // Save the transaction scope for easier debugging of problems ( ( com . ibm . ejs . j2c . MCWrapper ) mcWrapper ) . setUnSharedPoolCoordinator ( affinity ) ; } } if ( isTracingEnabled && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "reserve" , new Object [ ] { mcWrapper , mcWrapper . getManagedConnection ( ) } ) ; return mcWrapper ;
public class ServerHandshaker { /** * This routine handles all the server side handshake messages , one at * a time . Given the message type ( and in some cases the pending cipher * spec ) it parses the type - specific message . Then it calls a function * that handles that specific message . * It updates the state machine as each message is processed , and writes * responses as needed using the connection in the constructor . */ void processMessage ( byte type , int message_len ) throws IOException { } }
// In SSLv3 and TLS , messages follow strictly increasing // numerical order _ except _ for one annoying special case . if ( ( state >= type ) && ( state != HandshakeMessage . ht_client_key_exchange && type != HandshakeMessage . ht_certificate_verify ) ) { throw new SSLProtocolException ( "Handshake message sequence violation, state = " + state + ", type = " + type ) ; } switch ( type ) { case HandshakeMessage . ht_client_hello : ClientHello ch = new ClientHello ( input , message_len ) ; /* * send it off for processing . */ this . clientHello ( ch ) ; break ; // NPN _ CHANGES _ BEGIN case NextProtocolMessage . ID : nextProtocol ( new NextProtocolMessage ( input ) ) ; break ; // NPN _ CHANGES _ END case HandshakeMessage . ht_certificate : if ( doClientAuth == SSLEngineImpl . clauth_none ) { fatalSE ( Alerts . alert_unexpected_message , "client sent unsolicited cert chain" ) ; // NOTREACHED } this . clientCertificate ( new CertificateMsg ( input ) ) ; break ; case HandshakeMessage . ht_client_key_exchange : SecretKey preMasterSecret ; switch ( keyExchange ) { case K_RSA : case K_RSA_EXPORT : /* * The client ' s pre - master secret is decrypted using * either the server ' s normal private RSA key , or the * temporary one used for non - export or signing - only * certificates / keys . */ RSAClientKeyExchange pms = new RSAClientKeyExchange ( protocolVersion , clientRequestedVersion , sslContext . getSecureRandom ( ) , input , message_len , privateKey ) ; preMasterSecret = this . clientKeyExchange ( pms ) ; break ; case K_KRB5 : case K_KRB5_EXPORT : preMasterSecret = this . clientKeyExchange ( new KerberosClientKeyExchange ( protocolVersion , clientRequestedVersion , sslContext . getSecureRandom ( ) , input , kerberosKeys ) ) ; break ; case K_DHE_RSA : case K_DHE_DSS : case K_DH_ANON : /* * The pre - master secret is derived using the normal * Diffie - Hellman calculation . Note that the main * protocol difference in these five flavors is in how * the ServerKeyExchange message was constructed ! */ preMasterSecret = this . clientKeyExchange ( new DHClientKeyExchange ( input ) ) ; break ; case K_ECDH_RSA : case K_ECDH_ECDSA : case K_ECDHE_RSA : case K_ECDHE_ECDSA : case K_ECDH_ANON : preMasterSecret = this . clientKeyExchange ( new ECDHClientKeyExchange ( input ) ) ; break ; default : throw new SSLProtocolException ( "Unrecognized key exchange: " + keyExchange ) ; } // All keys are calculated from the premaster secret // and the exchanged nonces in the same way . calculateKeys ( preMasterSecret , clientRequestedVersion ) ; break ; case HandshakeMessage . ht_certificate_verify : this . clientCertificateVerify ( new CertificateVerify ( input , localSupportedSignAlgs , protocolVersion ) ) ; break ; case HandshakeMessage . ht_finished : // A ChangeCipherSpec record must have been received prior to // reception of the Finished message ( RFC 5246 , 7.4.9 ) . if ( ! receivedChangeCipherSpec ( ) ) { fatalSE ( Alerts . alert_handshake_failure , "Received Finished message before ChangeCipherSpec" ) ; } this . clientFinished ( new Finished ( protocolVersion , input , cipherSuite ) ) ; break ; default : throw new SSLProtocolException ( "Illegal server handshake msg, " + type ) ; } // Move state machine forward if the message handling // code didn ' t already do so if ( state < type ) { if ( type == HandshakeMessage . ht_certificate_verify ) { state = type + 2 ; // an annoying special case // NPN _ CHANGES _ START } else if ( type == NextProtocolMessage . ID ) { // Do nothing // NPN _ CHANGES _ END } else { state = type ; } }
public class MarkLogicRepositoryConnection { /** * sets default graph permissions to be used by all queries * @ param graphPerms */ @ Override public void setDefaultGraphPerms ( GraphPermissions graphPerms ) { } }
if ( notNull ( graphPerms ) ) { this . defaultGraphPerms = graphPerms ; } else { this . defaultGraphPerms = client . emptyGraphPerms ( ) ; }
public class Utils { /** * Checks whether given given { @ link Tuple } s have equal * { @ link Tuple # getSize ( ) size } , and throws an * < code > IllegalArgumentException < / code > if not . * @ param t0 The first tuple * @ param t1 The second tuple * @ throws IllegalArgumentException If the given tuples * do not have the same { @ link Tuple # getSize ( ) size } */ public static void checkForEqualSize ( Tuple t0 , Tuple t1 ) { } }
if ( t0 . getSize ( ) != t1 . getSize ( ) ) { throw new IllegalArgumentException ( "Sizes do not match: " + t0 . getSize ( ) + " and " + t1 . getSize ( ) ) ; }
public class FlexBase64 { /** * Creates an InputStream wrapper which encodes a source into base64 as it is read , until the source hits EOF . * Upon hitting EOF , a standard base64 termination sequence will be readable . Clients can simply treat this input * stream as if they were reading from a base64 encoded file . This stream attempts to read and encode in buffer * size chunks from the source , in order to improve overall performance . Thus , BufferInputStream is not necessary * and will lead to double buffering . * < p > This stream is not thread - safe , and should not be shared between threads , without establishing a * happens - before relationship . < / p > * @ param source an input source to read from * @ param bufferSize the chunk size to buffer from the source * @ param wrap whether or not the stream should wrap base64 output at 76 characters * @ return an encoded input stream instance . */ public static EncoderInputStream createEncoderInputStream ( InputStream source , int bufferSize , boolean wrap ) { } }
return new EncoderInputStream ( source , bufferSize , wrap , false ) ;
public class ImplicitObjects { /** * Creates the Map that maps parameter name to single parameter * value . */ public static Map createParamMap ( PageContext pContext ) { } }
final HttpServletRequest request = ( HttpServletRequest ) pContext . getRequest ( ) ; return new EnumeratedMap ( ) { public Enumeration enumerateKeys ( ) { return request . getParameterNames ( ) ; } public Object getValue ( Object pKey ) { if ( pKey instanceof String ) { return request . getParameter ( ( String ) pKey ) ; } else { return null ; } } public boolean isMutable ( ) { return false ; } } ;
public class FuzzyAllenIntervalConstraint { /** * Get current possibilities of all Allen relations ( types ) . * @ return The current possibilities of all Allen relations . */ public HashMap < FuzzyAllenIntervalConstraint . Type , Double > getPossibilities ( ) { } }
HashMap < FuzzyAllenIntervalConstraint . Type , Double > fr = new HashMap < FuzzyAllenIntervalConstraint . Type , Double > ( ) ; for ( Type t : Type . values ( ) ) fr . put ( t , 0.0 ) ; for ( Type type : types ) { for ( int t = 0 ; t < FuzzyAllenIntervalConstraint . freksa_neighbor [ type . ordinal ( ) ] . length ; t ++ ) if ( fr . get ( FuzzyAllenIntervalConstraint . lookupTypeByInt ( t ) ) != null ) { fr . put ( FuzzyAllenIntervalConstraint . lookupTypeByInt ( t ) , Math . max ( fr . get ( FuzzyAllenIntervalConstraint . lookupTypeByInt ( t ) ) , FuzzyAllenIntervalConstraint . getPossibilityDegree ( FuzzyAllenIntervalConstraint . freksa_neighbor [ type . ordinal ( ) ] [ t ] ) ) ) ; } else { fr . put ( FuzzyAllenIntervalConstraint . lookupTypeByInt ( t ) , FuzzyAllenIntervalConstraint . getPossibilityDegree ( FuzzyAllenIntervalConstraint . freksa_neighbor [ type . ordinal ( ) ] [ t ] ) ) ; } } return fr ;
public class ClientService { /** * This function deletes a client , but its transactions are not deleted . * @ param client * A { @ link Client } with Id . */ public void delete ( Client client ) { } }
RestfulUtils . delete ( ClientService . PATH , client , Client . class , super . httpClient ) ;
public class Assert { /** * Asserts that the { @ link Thread # currentThread ( ) current Thread } holds the specified { @ link Object lock } . * The assertion holds if and only if the { @ link Object lock } is not { @ literal null } * and the { @ link Thread # currentThread ( ) current Thread } holds the given { @ link Object lock } . * @ param lock { @ link Object } used as the lock , monitor or mutex in the synchronization . * @ param message { @ link String } containing the message used in the { @ link IllegalMonitorStateException } thrown * if the assertion fails . * @ param arguments array of { @ link Object arguments } used as placeholder values * when formatting the { @ link String message } . * @ throws java . lang . IllegalMonitorStateException if the { @ link Thread # currentThread ( ) current Thread } * does not hold the { @ link Object lock } or the { @ link Object lock } is { @ literal null } . * @ see # holdsLock ( Object , RuntimeException ) * @ see java . lang . Thread # holdsLock ( Object ) */ public static void holdsLock ( Object lock , String message , Object ... arguments ) { } }
holdsLock ( lock , new IllegalMonitorStateException ( format ( message , arguments ) ) ) ;
public class TransactionRequestProcessor { /** * Checks if the transaction was already prepared in another node * The client can send multiple requests to the server ( in case of timeout or similar ) . This request is ignored when * ( 1 ) the originator is still alive ; ( 2 ) the transaction is prepared or committed / rolled - back * If the transaction isn ' t prepared and the originator left the cluster , the previous transaction is rolled - back and * a new one is started . */ private boolean checkExistingTxForPrepare ( HotRodHeader header , PrepareCoordinator txCoordinator ) { } }
TxState txState = txCoordinator . getTxState ( ) ; if ( txState == null ) { return false ; } if ( txCoordinator . isAlive ( txState . getOriginator ( ) ) ) { // transaction started on another node but the node is still in the topology . 2 possible scenarios : // #1 , the topology isn ' t updated // #2 , the client timed - out waiting for the reply // in any case , we send a ignore reply and the client is free to retry ( or rollback ) writeNotExecuted ( header ) ; return true ; } // originator is dead . . . // First phase state machine // success ACTIVE - > PREPARING - > PREPARED // failed ACTIVE - > MARK _ ROLLBACK - > ROLLED _ BACK or ACTIVE - > PREPARING - > ROLLED _ BACK // 1PC success ACTIVE - > PREPARING - > MARK _ COMMIT - > COMMITTED switch ( txState . getStatus ( ) ) { case ACTIVE : case PREPARING : // rollback existing transaction and retry with a new one txCoordinator . rollbackRemoteTransaction ( txState . getGlobalTransaction ( ) ) ; return false ; case PREPARED : // 2PC since 1PC never reaches this state writeResponse ( header , createTransactionResponse ( header , XAResource . XA_OK ) ) ; return true ; case MARK_ROLLBACK : // make sure it is rolled back and reply to the client txCoordinator . rollbackRemoteTransaction ( txState . getGlobalTransaction ( ) ) ; case ROLLED_BACK : writeResponse ( header , createTransactionResponse ( header , XAException . XA_RBROLLBACK ) ) ; return true ; case MARK_COMMIT : writeResponse ( header , createTransactionResponse ( header , txCoordinator . onePhaseCommitRemoteTransaction ( txState . getGlobalTransaction ( ) , txState . getModifications ( ) ) ) ) ; return true ; case COMMITTED : writeResponse ( header , createTransactionResponse ( header , XAResource . XA_OK ) ) ; return true ; default : throw new IllegalStateException ( ) ; }
public class HtmlUtils { /** * Detect simple HTML contained inside of the given < code > value < / code > string . * @ param value the value * @ return < code > true < / code > if the string contains HTML ; < code > false < / code > otherwise . */ public static boolean containsHtml ( String value ) { } }
int numChars = value . length ( ) ; char c ; for ( int i = 0 ; i < numChars ; i ++ ) { c = value . charAt ( i ) ; switch ( c ) { case '<' : return true ; case '&' : return true ; case '"' : return true ; } } return false ;
public class StreamEx { /** * Returns a stream consisting of the elements of this stream that don ' t * equal to the given value . * This is an < a href = " package - summary . html # StreamOps " > intermediate < / a > * operation . * @ param value the value to remove from the stream . If the value is null * then all nulls will be removed ( like { @ link # nonNull ( ) } works ) . * Otherwise { @ code value . equals ( ) } will be used to test stream * values and matching elements will be removed . * @ return the new stream * @ since 0.2.2 * @ see # without ( Object . . . ) * @ see # remove ( Predicate ) */ public StreamEx < T > without ( T value ) { } }
if ( value == null ) return filter ( Objects :: nonNull ) ; return remove ( value :: equals ) ;
public class JobResource { /** * The Python - language Lambda functions for this job . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setLambdaResources ( java . util . Collection ) } or { @ link # withLambdaResources ( java . util . Collection ) } if you * want to override the existing values . * @ param lambdaResources * The Python - language Lambda functions for this job . * @ return Returns a reference to this object so that method calls can be chained together . */ public JobResource withLambdaResources ( LambdaResource ... lambdaResources ) { } }
if ( this . lambdaResources == null ) { setLambdaResources ( new java . util . ArrayList < LambdaResource > ( lambdaResources . length ) ) ; } for ( LambdaResource ele : lambdaResources ) { this . lambdaResources . add ( ele ) ; } return this ;
public class MODCAInterchangeSetImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public void setISid ( Integer newISid ) { } }
Integer oldISid = iSid ; iSid = newISid ; if ( eNotificationRequired ( ) ) eNotify ( new ENotificationImpl ( this , Notification . SET , AfplibPackage . MODCA_INTERCHANGE_SET__ISID , oldISid , iSid ) ) ;
public class GvmClusters { /** * Collapses the number of clusters subject to constraints on the maximum * permitted variance , and the least number of clusters . This method may be * called at any time , including between calls to add ( ) . * @ param maxVar * an upper bound on the global variance that may not be exceeded * by merging clusters * @ param minClusters * a lower bound on the the number of clusters that may not be * exceeded by merging clusters */ public void reduce ( double maxVar , int minClusters ) { } }
if ( minClusters < 0 ) throw new IllegalArgumentException ( "negative minClusters" ) ; if ( count <= minClusters ) return ; // nothing to do double totalVar = 0.0 ; double totalMass = 0.0 ; for ( int i = 0 ; i < count ; i ++ ) { GvmCluster < S , K > cluster = clusters [ i ] ; totalVar += cluster . var ; totalMass += cluster . m0 ; } while ( count > minClusters ) { if ( count == 1 ) { // remove the last cluster for ( int i = 0 ; i < bound ; i ++ ) { GvmCluster < S , K > c = clusters [ i ] ; if ( ! c . removed ) { c . removed = true ; break ; } } } else { GvmClusterPair < S , K > mergePair = pairs . peek ( ) ; GvmCluster < S , K > c1 = mergePair . c1 ; GvmCluster < S , K > c2 = mergePair . c2 ; if ( c1 . m0 < c2 . m0 ) { c1 = c2 ; c2 = mergePair . c1 ; } if ( maxVar >= 0.0 ) { double diff = c1 . test ( c2 ) - c1 . var - c2 . var ; totalVar += diff ; if ( totalVar / totalMass > maxVar ) break ; // stop here , we are going to exceed maximum } c1 . key = keyer . mergeKeys ( c1 , c2 ) ; c1 . add ( c2 ) ; updatePairs ( c1 ) ; removePairs ( c2 ) ; c2 . removed = true ; } count -- ; } // iterate over clusters and remove dead clusters { int j = 0 ; for ( int i = 0 ; i < bound ; ) { boolean lose = clusters [ i ] . removed ; if ( lose ) { i ++ ; } else { if ( i != j ) clusters [ j ] = clusters [ i ] ; i ++ ; j ++ ; } } for ( ; j < bound ; j ++ ) { clusters [ j ] = null ; } } // iterate over cluster pairs and remove dead pairs for ( int i = 0 ; i < count ; i ++ ) { GvmCluster < S , K > cluster = clusters [ i ] ; GvmClusterPair < S , K > [ ] pairs = cluster . pairs ; int k = 0 ; for ( int j = 0 ; j < bound - 1 ; ) { GvmClusterPair < S , K > pair = pairs [ j ] ; boolean lose = pair . c1 . removed || pair . c2 . removed ; if ( lose ) { j ++ ; } else { if ( j != k ) pairs [ k ] = pairs [ j ] ; k ++ ; j ++ ; } } for ( ; k < bound ; k ++ ) { pairs [ k ] = null ; } } bound = count ;
public class StorePath { /** * 获取Group名称 * @ param filePath * @ return */ private static String getGroupName ( String filePath ) { } }
// 先分隔开路径 String [ ] paths = filePath . split ( SPLIT_GROUP_NAME_AND_FILENAME_SEPERATOR ) ; if ( paths . length == 1 ) { throw new FdfsUnsupportStorePathException ( "解析文件路径错误,有效的路径样式为(group/path) 而当前解析路径为" . concat ( filePath ) ) ; } System . out . println ( paths ) ; for ( String item : paths ) { if ( item . indexOf ( SPLIT_GROUP_NAME ) != - 1 ) { return item ; } } throw new FdfsUnsupportStorePathException ( "解析文件路径错误,被解析路径url没有group,当前解析路径为" . concat ( filePath ) ) ;
public class RangeCondition { /** * { @ inheritDoc } */ @ Override public Query query ( Schema schema ) { } }
if ( field == null || field . trim ( ) . isEmpty ( ) ) { throw new IllegalArgumentException ( "Field name required" ) ; } ColumnMapperSingle < ? > columnMapper = getMapper ( schema , field ) ; Class < ? > clazz = columnMapper . baseClass ( ) ; Query query ; if ( clazz == String . class ) { String lower = ( String ) columnMapper . queryValue ( field , this . lower ) ; String upper = ( String ) columnMapper . queryValue ( field , this . upper ) ; if ( lower != null ) { lower = analyze ( field , lower , schema ) ; } if ( upper != null ) { upper = analyze ( field , upper , schema ) ; } query = TermRangeQuery . newStringRange ( field , lower , upper , includeLower , includeUpper ) ; } else if ( clazz == Integer . class ) { Integer lower = ( Integer ) columnMapper . queryValue ( field , this . lower ) ; Integer upper = ( Integer ) columnMapper . queryValue ( field , this . upper ) ; query = NumericRangeQuery . newIntRange ( field , lower , upper , includeLower , includeUpper ) ; } else if ( clazz == Long . class ) { Long lower = ( Long ) columnMapper . queryValue ( field , this . lower ) ; Long upper = ( Long ) columnMapper . queryValue ( field , this . upper ) ; query = NumericRangeQuery . newLongRange ( field , lower , upper , includeLower , includeUpper ) ; } else if ( clazz == Float . class ) { Float lower = ( Float ) columnMapper . queryValue ( field , this . lower ) ; Float upper = ( Float ) columnMapper . queryValue ( field , this . upper ) ; query = NumericRangeQuery . newFloatRange ( field , lower , upper , includeLower , includeUpper ) ; } else if ( clazz == Double . class ) { Double lower = ( Double ) columnMapper . queryValue ( field , this . lower ) ; Double upper = ( Double ) columnMapper . queryValue ( field , this . upper ) ; query = NumericRangeQuery . newDoubleRange ( field , lower , upper , includeLower , includeUpper ) ; } else { String message = String . format ( "Range queries are not supported by %s mapper" , clazz . getSimpleName ( ) ) ; throw new UnsupportedOperationException ( message ) ; } query . setBoost ( boost ) ; return query ;
public class ObjectMapperFactory { /** * @ see # setSubtypes ( NamedType . . . ) * @ param subtypes the { @ link NamedType } s to add to { @ link # setSubtypeList ( List ) sub - type list } for registration . */ public void addSubtypes ( NamedType ... subtypes ) { } }
if ( this . subtypeList == null ) { this . subtypeList = new ArrayList < > ( ) ; } this . subtypeList . addAll ( Arrays . asList ( subtypes ) ) ;
public class Clustering { /** * Add a cluster to the clustering . * @ param parent Parent cluster * @ param child Child cluster . */ public void addChildCluster ( Cluster < M > parent , Cluster < M > child ) { } }
hierarchy . add ( parent , child ) ;
public class ArrayIterate { /** * Returns the first element of an array . This method is null safe . */ public static < T > T getFirst ( T [ ] objectArray ) { } }
if ( ArrayIterate . notEmpty ( objectArray ) ) { return objectArray [ 0 ] ; } return null ;
public class Providers { /** * sun . security . util . ManifestEntryVerifier and java . security . SecureRandom . */ public static Provider getSunProvider ( ) { } }
try { Class < ? > clazz = Class . forName ( jarVerificationProviders [ 0 ] ) ; return ( Provider ) clazz . newInstance ( ) ; } catch ( Exception e ) { try { Class < ? > clazz = Class . forName ( BACKUP_PROVIDER_CLASSNAME ) ; return ( Provider ) clazz . newInstance ( ) ; } catch ( Exception ee ) { throw new RuntimeException ( "Sun provider not found" , e ) ; } }
public class ImageLoader { /** * Loads and decodes image synchronously . < br / > * < b > NOTE : < / b > { @ link # init ( ImageLoaderConfiguration ) } method must be called before this method call * @ param uri Image URI ( i . e . " http : / / site . com / image . png " , " file : / / / mnt / sdcard / image . png " ) * @ param targetImageSize Minimal size for { @ link Bitmap } which will be returned . Downloaded image will be decoded * and scaled to { @ link Bitmap } of the size which is < b > equal or larger < / b > ( usually a bit * larger ) than incoming targetImageSize . * @ param options { @ linkplain com . nostra13 . universalimageloader . core . DisplayImageOptions Options } for image * decoding and scaling . If < b > null < / b > - default display image options * { @ linkplain ImageLoaderConfiguration . Builder # defaultDisplayImageOptions ( DisplayImageOptions ) * from configuration } will be used . * @ return Result image Bitmap . Can be < b > null < / b > if image loading / decoding was failed or cancelled . * @ throws IllegalStateException if { @ link # init ( ImageLoaderConfiguration ) } method wasn ' t called before */ public Bitmap loadImageSync ( String uri , ImageSize targetImageSize , DisplayImageOptions options ) { } }
if ( options == null ) { options = configuration . defaultDisplayImageOptions ; } options = new DisplayImageOptions . Builder ( ) . cloneFrom ( options ) . syncLoading ( true ) . build ( ) ; SyncImageLoadingListener listener = new SyncImageLoadingListener ( ) ; loadImage ( uri , targetImageSize , options , listener ) ; return listener . getLoadedBitmap ( ) ;
public class HBCIDialog { /** * < p > Ausführen aller bisher erzeugten Aufträge . Diese Methode veranlasst den HBCI - Kernel , * die Aufträge , die durch die Aufrufe auszuführen . < / p > * @ return ein Status - Objekt , anhand dessen der Erfolg oder das Fehlschlagen * der Dialoge festgestellt werden kann . */ public HBCIExecStatus execute ( boolean closeDialog ) { } }
HBCIExecStatus ret = new HBCIExecStatus ( ) ; log . debug ( "executing dialog" ) ; try { ret . setDialogStatus ( doIt ( closeDialog ) ) ; } catch ( Exception e ) { ret . addException ( e ) ; } return ret ;
public class SCCs { /** * Find all strongly - connected components in a graph . When a new SCC is found , the { @ link * SCCListener # foundSCC ( java . util . Collection ) } method is invoked . The listener object may hence not be null . * Tarjan ' s algorithm is used for realizing the SCC search . * @ param graph * the graph * @ param listener * the SCC listener * @ see TarjanSCCVisitor */ public static < N , E > void findSCCs ( Graph < N , E > graph , SCCListener < N > listener ) { } }
TarjanSCCVisitor < N , E > vis = new TarjanSCCVisitor < > ( graph , listener ) ; for ( N node : graph ) { if ( ! vis . hasVisited ( node ) ) { GraphTraversal . depthFirst ( graph , node , vis ) ; } }
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public EEnum getIfcSequenceEnum ( ) { } }
if ( ifcSequenceEnumEEnum == null ) { ifcSequenceEnumEEnum = ( EEnum ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 1061 ) ; } return ifcSequenceEnumEEnum ;
public class AbstractExecutableMemberWriter { /** * Get the type parameters for the executable member . * @ param member the member for which to get the type parameters . * @ return the type parameters . */ protected Content getTypeParameters ( ExecutableElement member ) { } }
LinkInfoImpl linkInfo = new LinkInfoImpl ( configuration , MEMBER_TYPE_PARAMS , member ) ; return writer . getTypeParameterLinks ( linkInfo ) ;
public class MListTable { /** * Retourne la liste d ' objets sélectionnés . * @ return List * @ see # setSelectedList */ public List < T > getSelectedList ( ) { } }
final int [ ] selectedRows = getSelectedRows ( ) ; int selectedRow ; final int rowCount = getRowCount ( ) ; final int length = selectedRows . length ; final List < T > selectedList = new ArrayList < > ( length ) ; for ( int i = 0 ; i < length ; i ++ ) { selectedRow = selectedRows [ i ] ; // getSelectedRows peut renvoyer des lignes qui ne sont plus // dans la table si ce sont les denières sélectionnées if ( selectedRow >= 0 && selectedRow < rowCount ) { selectedList . add ( getObjectAt ( selectedRow ) ) ; } } return selectedList ;
public class TreeReaderRegistry { /** * Deregisters the given format from the registry . * @ param format * name of the format ( eg . " custom " , " csv " , etc . ) */ public static final void removeReader ( String format ) { } }
String key = format . toLowerCase ( ) ; if ( key . equals ( JSON ) ) { throw new IllegalArgumentException ( "Unable to delete the default JSON reader!" ) ; } readers . remove ( key ) ;
public class CurationManager { /** * Get the data payload ( ending in ' . tfpackage ' ) from the provided object . * @ param object * The digital object holding our payload * @ return Payload The payload requested * @ throws StorageException * if an errors occurs or the payload is not found */ private JsonSimple getWorkflowData ( String oid ) { } }
// Get our data from Storage Payload payload = null ; try { DigitalObject object = storage . getObject ( oid ) ; payload = object . getPayload ( WORKFLOW_PAYLOAD ) ; } catch ( StorageException ex ) { log . error ( "Error accessing object '{}' in storage: " , oid , ex ) ; return null ; } // Parse the JSON try { try { return new JsonSimple ( payload . open ( ) ) ; } catch ( IOException ex ) { log . error ( "Error parsing workflow '{}': " , oid , ex ) ; return null ; } finally { payload . close ( ) ; } } catch ( StorageException ex ) { log . error ( "Error accessing workflow '{}' in storage: " , oid , ex ) ; return null ; }
public class ApiOvhEmailexchange { /** * Generate outlook url * REST : POST / email / exchange / { organizationName } / service / { exchangeService } / account / { primaryEmailAddress } / outlookURL * @ param version [ required ] Version of outlook * @ param language [ required ] Language of outlook * @ param organizationName [ required ] The internal name of your exchange organization * @ param exchangeService [ required ] The internal name of your exchange service * @ param primaryEmailAddress [ required ] Default email for this mailbox */ public OvhTask organizationName_service_exchangeService_account_primaryEmailAddress_outlookURL_POST ( String organizationName , String exchangeService , String primaryEmailAddress , OvhLanguageEnum language , OvhOutlookVersionEnum version ) throws IOException { } }
String qPath = "/email/exchange/{organizationName}/service/{exchangeService}/account/{primaryEmailAddress}/outlookURL" ; StringBuilder sb = path ( qPath , organizationName , exchangeService , primaryEmailAddress ) ; HashMap < String , Object > o = new HashMap < String , Object > ( ) ; addBody ( o , "language" , language ) ; addBody ( o , "version" , version ) ; String resp = exec ( qPath , "POST" , sb . toString ( ) , o ) ; return convertTo ( resp , OvhTask . class ) ;
public class BulkSQSOperation { /** * Bulk action on list of message identifiers up to the provided index * @ param messageIdentifierList * Container for the list of message identifiers * @ param indexOfMessage * The action will apply to all messages up to this index * @ throws JMSException * if < code > action < / code > throws */ public void bulkAction ( List < SQSMessageIdentifier > messageIdentifierList , int indexOfMessage ) throws JMSException { } }
assert indexOfMessage > 0 ; assert indexOfMessage <= messageIdentifierList . size ( ) ; Map < String , List < String > > receiptHandleWithSameQueueUrl = new HashMap < String , List < String > > ( ) ; // Add all messages up to and including requested message into Map . // Map contains key as queueUrl and value as list receiptHandles from // that queueUrl . for ( int i = 0 ; i < indexOfMessage ; i ++ ) { SQSMessageIdentifier messageIdentifier = messageIdentifierList . get ( i ) ; String queueUrl = messageIdentifier . getQueueUrl ( ) ; List < String > receiptHandles = receiptHandleWithSameQueueUrl . get ( queueUrl ) ; // if value of queueUrl is null create new list . if ( receiptHandles == null ) { receiptHandles = new ArrayList < String > ( ) ; receiptHandleWithSameQueueUrl . put ( queueUrl , receiptHandles ) ; } // add receiptHandle to the list . receiptHandles . add ( messageIdentifier . getReceiptHandle ( ) ) ; // Once there are 10 messages in messageBatch , apply the batch action if ( receiptHandles . size ( ) == SQSMessagingClientConstants . MAX_BATCH ) { action ( queueUrl , receiptHandles ) ; receiptHandles . clear ( ) ; } } // Flush rest of messages in map . for ( Entry < String , List < String > > entry : receiptHandleWithSameQueueUrl . entrySet ( ) ) { action ( entry . getKey ( ) , entry . getValue ( ) ) ; }
public class IntBuffer { /** * Compares the remaining ints of this buffer to another int buffer ' s remaining ints . * @ param otherBuffer another int buffer . * @ return a negative value if this is less than { @ code other } ; 0 if this equals to { @ code * other } ; a positive value if this is greater than { @ code other } . * @ exception ClassCastException if { @ code other } is not an int buffer . */ public int compareTo ( IntBuffer otherBuffer ) { } }
int compareRemaining = ( remaining ( ) < otherBuffer . remaining ( ) ) ? remaining ( ) : otherBuffer . remaining ( ) ; int thisPos = position ; int otherPos = otherBuffer . position ; // BEGIN android - changed int thisInt , otherInt ; while ( compareRemaining > 0 ) { thisInt = get ( thisPos ) ; otherInt = otherBuffer . get ( otherPos ) ; if ( thisInt != otherInt ) { return thisInt < otherInt ? - 1 : 1 ; } thisPos ++ ; otherPos ++ ; compareRemaining -- ; } // END android - changed return remaining ( ) - otherBuffer . remaining ( ) ;
public class Objects { /** * Checks boolean expression . * @ param expression a boolean expression * @ param message error message * @ param messageArgs array of parameters to the message * @ throws IllegalArgumentException if boolean expression is false */ public static void validArgument ( boolean expression , String message , Object ... messageArgs ) { } }
if ( ! expression ) { throw new IllegalArgumentException ( format ( message , messageArgs ) ) ; }
public class CmsResourceInfoDialog { /** * Initializes the user object . < p > */ protected void initFileInfo ( ) { } }
try { // edit an existing user , get the user object from db m_path = getParamResource ( ) ; m_title = getCms ( ) . readPropertyObject ( m_path , CmsPropertyDefinition . PROPERTY_TITLE , false ) . getValue ( "-" ) ; } catch ( CmsException e ) { // should never happen }
public class PhaxioFaxClientSpi { /** * This function initializes the fax action type to resource mappings . * @ return The fax action type to resource mappings */ @ Override protected Map < FaxActionType , String > initializeFaxActionType2ResourceMap ( ) { } }
// set resources Map < FaxActionType , String > map = new HashMap < FaxActionType , String > ( ) ; map . put ( FaxActionType . SUBMIT_FAX_JOB , "/v1/send" ) ; map . put ( FaxActionType . CANCEL_FAX_JOB , "/v1/faxCancel" ) ; map . put ( FaxActionType . GET_FAX_JOB_STATUS , "/v1/faxStatus" ) ; return map ;
public class Lists { /** * An implementation of { @ link List # indexOf ( Object ) } . */ static int indexOfImpl ( List < ? > list , @ Nullable Object element ) { } }
ListIterator < ? > listIterator = list . listIterator ( ) ; while ( listIterator . hasNext ( ) ) { if ( Objects . equal ( element , listIterator . next ( ) ) ) { return listIterator . previousIndex ( ) ; } } return - 1 ;
public class ColumnPrinter { /** * Add a value to the nth column * @ param columnIndex n * @ param value value to add */ void addValue ( int columnIndex , String value ) { } }
if ( ( columnIndex < 0 ) || ( columnIndex >= data . size ( ) ) ) { throw new IllegalArgumentException ( ) ; } List < String > stringList = data . get ( columnIndex ) ; stringList . add ( value ) ;
public class SIXAResourceProxy { /** * Called when the transaction manager would like us to * prepare to complete the transaction . * Across the wire we will flow : * BIT32 Transaction Id * The XID Structure * @ param xid * @ return Returns the result of the resource manager ' s vote on whether * we can commit . This will be XA _ RDONLY if we have done no work * as part of this transaction , or XA _ OK . * @ throws XAException if an exception is thrown at the ME . In the * event of a comms failure , an XAException with XAER _ RMFAIL will * be thrown . */ public int prepare ( Xid xid ) throws XAException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "prepare" , xid ) ; final int result = internalPrepare ( xid ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "prepare" , "" + result ) ; return result ;
public class BondManipulator { /** * Get the single bond equivalent ( SBE ) of a list of bonds , given an iterator to the list . * @ param bonds An iterator to the list of bonds * @ return The SBE sum */ public static int getSingleBondEquivalentSum ( Iterator < IBond > bonds ) { } }
int sum = 0 ; while ( bonds . hasNext ( ) ) { IBond . Order order = bonds . next ( ) . getOrder ( ) ; if ( order != null ) { sum += order . numeric ( ) ; } } return sum ;
public class SubPlanAssembler { /** * Get an index scan access plan for a table . * @ param tableAliasIndex The table to get data from . * @ param path The access path to access the data in the table ( index / scan / etc ) . * @ return An index scan plan node OR , * in one edge case , an NLIJ of a MaterializedScan and an index scan plan node . */ private static AbstractPlanNode getIndexAccessPlanForTable ( StmtTableScan tableScan , AccessPath path ) { } }
// now assume this will be an index scan and get the relevant index Index index = path . index ; IndexScanPlanNode scanNode = new IndexScanPlanNode ( tableScan , index ) ; AbstractPlanNode resultNode = scanNode ; // set sortDirection here because it might be used for IN list scanNode . setSortDirection ( path . sortDirection ) ; // Build the list of search - keys for the index in question // They are the rhs expressions of normalized indexExpr comparisons // except for geo indexes . For geo indexes , the search key is directly // the one element of indexExprs . for ( AbstractExpression expr : path . indexExprs ) { if ( path . lookupType == IndexLookupType . GEO_CONTAINS ) { scanNode . addSearchKeyExpression ( expr ) ; scanNode . addCompareNotDistinctFlag ( false ) ; continue ; } AbstractExpression exprRightChild = expr . getRight ( ) ; assert ( exprRightChild != null ) ; if ( expr . getExpressionType ( ) == ExpressionType . COMPARE_IN ) { // Replace this method ' s result with an injected NLIJ . resultNode = injectIndexedJoinWithMaterializedScan ( exprRightChild , scanNode ) ; // Extract a TVE from the LHS MaterializedScan for use by the IndexScan in its new role . MaterializedScanPlanNode matscan = ( MaterializedScanPlanNode ) resultNode . getChild ( 0 ) ; AbstractExpression elemExpr = matscan . getOutputExpression ( ) ; assert ( elemExpr != null ) ; // Replace the IN LIST condition in the end expression referencing all the list elements // with a more efficient equality filter referencing the TVE for each element in turn . replaceInListFilterWithEqualityFilter ( path . endExprs , exprRightChild , elemExpr ) ; // Set up the similar VectorValue - - > TVE replacement of the search key expression . exprRightChild = elemExpr ; } if ( exprRightChild instanceof AbstractSubqueryExpression ) { // The AbstractSubqueryExpression must be wrapped up into a // ScalarValueExpression which extracts the actual row / column from // the subquery // ENG - 8175 : this part of code seems not working for float / varchar type index ? ! // DEAD CODE with the guards on index : ENG - 8203 assert ( false ) ; } scanNode . addSearchKeyExpression ( exprRightChild ) ; // If the index expression is an " IS NOT DISTINCT FROM " comparison , let the NULL values go through . ( ENG - 11096) scanNode . addCompareNotDistinctFlag ( expr . getExpressionType ( ) == ExpressionType . COMPARE_NOTDISTINCT ) ; } // create the IndexScanNode with all its metadata scanNode . setLookupType ( path . lookupType ) ; scanNode . setBindings ( path . bindings ) ; scanNode . setEndExpression ( ExpressionUtil . combinePredicates ( path . endExprs ) ) ; if ( ! path . index . getPredicatejson ( ) . isEmpty ( ) ) { try { scanNode . setPartialIndexPredicate ( AbstractExpression . fromJSONString ( path . index . getPredicatejson ( ) , tableScan ) ) ; } catch ( JSONException e ) { throw new PlanningErrorException ( e . getMessage ( ) , 0 ) ; } } scanNode . setPredicate ( path . otherExprs ) ; // Propagate the sorting information // into the scan node from the access path . // The initial expression is needed to control a ( short ? ) forward scan to adjust the start of a reverse // iteration after it had to initially settle for starting at " greater than a prefix key " . scanNode . setInitialExpression ( ExpressionUtil . combinePredicates ( path . initialExpr ) ) ; scanNode . setSkipNullPredicate ( ) ; scanNode . setEliminatedPostFilters ( path . eliminatedPostExprs ) ; final IndexUseForOrderBy indexUse = scanNode . indexUse ( ) ; indexUse . setWindowFunctionUsesIndex ( path . m_windowFunctionUsesIndex ) ; indexUse . setSortOrderFromIndexScan ( path . sortDirection ) ; indexUse . setWindowFunctionIsCompatibleWithOrderBy ( path . m_stmtOrderByIsCompatible ) ; indexUse . setFinalExpressionOrderFromIndexScan ( path . m_finalExpressionOrder ) ; return resultNode ;
public class ConfigurationAbstractImpl { /** * Returns the class specified by the value for the specified key . If no * value for this key is found in the configuration , no class of this name * can be found or the specified class is not assignable to each * class / interface in < code > assignables defaultValue < / code > is returned . * @ param key the key * @ param defaultValue the default Value * @ param assignables classes and / or interfaces the specified class must * extend / implement . * @ return the value for the key , or < code > defaultValue < / code > */ public Class getClass ( String key , Class defaultValue , Class [ ] assignables ) { } }
String className = properties . getProperty ( key ) ; if ( className == null ) { if ( defaultValue == null ) { logger . info ( "No value for key '" + key + "'" ) ; return null ; } else { className = defaultValue . getName ( ) ; properties . put ( key , className ) ; logger . debug ( "No value for key \"" + key + "\", using default " + className + "." ) ; return defaultValue ; } } Class clazz = null ; try { clazz = ClassHelper . getClass ( className ) ; } catch ( ClassNotFoundException e ) { clazz = defaultValue ; logger . warn ( "Value \"" + className + "\" is illegal for key \"" + key + "\" (should be a class, using default value " + defaultValue + ")" , e ) ; } for ( int i = 0 ; i < assignables . length ; i ++ ) { Class assignable = assignables [ i ] ; if ( ! assignable . isAssignableFrom ( clazz ) ) { String extendsOrImplements ; if ( assignable . isInterface ( ) ) { extendsOrImplements = "implement the interface " ; } else { extendsOrImplements = "extend the class " ; } logger . error ( "The specified class \"" + className + "\" does not " + extendsOrImplements + assignables [ i ] . getName ( ) + ", which is a requirement for the key \"" + key + "\". Using default class " + defaultValue ) ; clazz = defaultValue ; } } return clazz ;
public class FloatAttribute { /** * BigDecimal */ public FloatAttribute plus ( BigDecimalAttribute attribute ) { } }
return FloatNumericType . getInstance ( ) . createCalculatedAttribute ( FloatNumericType . getInstance ( ) . createAdditionCalculator ( this , attribute ) ) ;
public class AstUtil { /** * Return the AnnotationNode for the named annotation , or else null . * Supports Groovy 1.5 and Groovy 1.6. * @ param node - the AnnotatedNode * @ param name - the name of the annotation * @ return the AnnotationNode or else null */ public static AnnotationNode getAnnotation ( AnnotatedNode node , String name ) { } }
List < AnnotationNode > annotations = node . getAnnotations ( ) ; for ( AnnotationNode annot : annotations ) { if ( annot . getClassNode ( ) . getName ( ) . equals ( name ) ) { return annot ; } } return null ;
public class CookieVersionData { /** * @ see * com . ibm . ws . http . channel . internal . values . CookieData # set ( com . ibm . websphere * . http . HttpCookie , byte [ ] ) */ @ Override public boolean set ( HttpCookie cookie , byte [ ] attribValue ) { } }
try { if ( null != cookie && null != attribValue && 0 < attribValue . length ) { cookie . setVersion ( GenericUtils . asIntValue ( attribValue ) ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Cookie version set to " + cookie . getVersion ( ) ) ; } return true ; } } catch ( Exception e ) { FFDCFilter . processException ( e , getClass ( ) . getName ( ) , "set" , this ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Exception setting version; " + e ) ; } } return false ;
public class KafkaProducerPusher { /** * Push all byte array messages to the Kafka topic . * @ param messages List of byte array messages to push to Kakfa . */ public void pushMessages ( List < byte [ ] > messages ) { } }
for ( byte [ ] message : messages ) { producer . send ( new ProducerRecord < > ( topic , message ) , ( recordMetadata , e ) -> { if ( e != null ) { log . error ( "Failed to send message to topic {} due to exception: " , topic , e ) ; } } ) ; }
public class PooledByteBufAllocator { /** * Return the number of thread local caches used by this { @ link PooledByteBufAllocator } . * @ deprecated use { @ link PooledByteBufAllocatorMetric # numThreadLocalCaches ( ) } . */ @ Deprecated public int numThreadLocalCaches ( ) { } }
PoolArena < ? > [ ] arenas = heapArenas != null ? heapArenas : directArenas ; if ( arenas == null ) { return 0 ; } int total = 0 ; for ( PoolArena < ? > arena : arenas ) { total += arena . numThreadCaches . get ( ) ; } return total ;
public class TransformerRegistry { /** * Get the sub registry for the domain . * @ param range the version range * @ return the sub registry */ public TransformersSubRegistration getDomainRegistration ( final ModelVersionRange range ) { } }
final PathAddress address = PathAddress . EMPTY_ADDRESS ; return new TransformersSubRegistrationImpl ( range , domain , address ) ;
public class GlobalExceptionHandler { /** * 全局处理错误 * @ param request current HTTP request * @ param response current HTTP response * @ param handler the executed handler , or < code > null < / code > if none chosen at the time of the exception ( for * example , * if multipart resolution failed ) * @ param ex the exception that got thrown during handler execution * @ return a corresponding ModelAndView to forward to , or < code > null < / code > for default processing * @ see org . springframework . web . servlet . HandlerExceptionResolver # resolveException ( javax . servlet . http * . HttpServletRequest , * javax . servlet . http . HttpServletResponse , java . lang . Object , java . lang . Exception ) */ @ Override public ModelAndView resolveException ( HttpServletRequest request , HttpServletResponse response , Object o , Exception e ) { } }
ModelAndView model = new ModelAndView ( new MappingJacksonJsonView ( ) ) ; try { if ( e instanceof TypeMismatchException ) { LOG . warn ( "TypeMismatchException occurred. " + e . getMessage ( ) ) ; return buildBizErrors ( ( TypeMismatchException ) e , model ) ; } else if ( e instanceof BindException ) { LOG . warn ( "BindException occurred. " + e . getMessage ( ) ) ; return buildBizErrors ( ( BindException ) e , model ) ; } else if ( e instanceof HttpRequestMethodNotSupportedException ) { LOG . warn ( "HttpRequestMethodNotSupportedException occurred. " + e . getMessage ( ) ) ; return buildError ( model , GlobalResponseStatusMsg . REQUEST_HTTP_METHOD_ERROR ) ; } else if ( e instanceof MissingServletRequestParameterException ) { LOG . warn ( "MissingServletRequestParameterException occurred. " + e . getMessage ( ) ) ; return buildError ( model , GlobalResponseStatusMsg . PARAM_MISS_ERROR ) ; } else { LOG . error ( "System error occurred. " + e . getMessage ( ) , e ) ; return buildError ( model , GlobalResponseStatusMsg . SYSTEM_ERROR ) ; } } catch ( Exception ex ) { // Omit all detailed error message including stack trace to external user LOG . error ( "Unexpected error occurred! This should never happen! " + ex . getMessage ( ) , ex ) ; model . addObject ( "status" , SYS_ERROR_CODE ) ; model . addObject ( "msg" , SYS_ERROR_MSG ) ; return model ; }
public class AnnotationDto { /** * Converts list of alert entity objects to list of alertDto objects . * @ param annotations List of alert entities . Cannot be null . * @ return List of alertDto objects . * @ throws WebApplicationException If an error occurs . */ public static List < AnnotationDto > transformToDto ( List < Annotation > annotations ) { } }
if ( annotations == null ) { throw new WebApplicationException ( "Null entity object cannot be converted to Dto object." , Status . INTERNAL_SERVER_ERROR ) ; } List < AnnotationDto > result = new ArrayList < > ( ) ; for ( Annotation annotation : annotations ) { result . add ( transformToDto ( annotation ) ) ; } return result ;
public class CmsContainerpageController { /** * Replaces the given drag - element with the given container element . < p > * @ param containerElement the container element to replace * @ param elementData the new element data * @ return the container element which replaced the old one * @ throws Exception if something goes wrong */ public CmsContainerPageElementPanel replaceContainerElement ( CmsContainerPageElementPanel containerElement , CmsContainerElementData elementData ) throws Exception { } }
I_CmsDropContainer parentContainer = containerElement . getParentTarget ( ) ; String containerId = parentContainer . getContainerId ( ) ; CmsContainerPageElementPanel replacer = null ; String elementContent = elementData . getContents ( ) . get ( containerId ) ; if ( ( elementContent != null ) && ( elementContent . trim ( ) . length ( ) > 0 ) ) { replacer = getContainerpageUtil ( ) . createElement ( elementData , parentContainer , false ) ; if ( containerElement . isNew ( ) ) { // if replacing element data has the same structure id , keep the ' new ' state by setting the new type property // this should only be the case when editing settings of a new element that has not been created in the VFS yet String id = getServerId ( containerElement . getId ( ) ) ; if ( elementData . getClientId ( ) . startsWith ( id ) ) { replacer . setNewType ( containerElement . getNewType ( ) ) ; } } replacer . setCreateNew ( containerElement . isCreateNew ( ) ) ; // replacer . setModelGroup ( containerElement . isModelGroup ( ) ) ; if ( isGroupcontainerEditing ( ) && ( containerElement . getInheritanceInfo ( ) != null ) ) { // in case of inheritance container editing , keep the inheritance info replacer . setInheritanceInfo ( containerElement . getInheritanceInfo ( ) ) ; // set the proper element options CmsInheritanceContainerEditor . getInstance ( ) . setOptionBar ( replacer ) ; } parentContainer . insert ( replacer , parentContainer . getWidgetIndex ( containerElement ) ) ; containerElement . removeFromParent ( ) ; initializeSubContainers ( replacer ) ; } cleanUpContainers ( ) ; return replacer ;
public class EJBMDOrchestrator { /** * Populate the transactionAttrs and activitySessionAttrs arrays from XML * metadata in activitySessionList and transactionList and from annotation * data in ejbMethods . */ private void initializeBeanMethodTransactionAttributes ( boolean isEntity , boolean addRemove , MethodInterface methodInterface , Method [ ] ejbMethods , String [ ] entityNoTxAttrMethods , String [ ] entityNoTxAttrMethodSignatures , List < ContainerTransaction > transactionList , List < ActivitySessionMethod > activitySessionList , String [ ] methodNames , Class < ? > [ ] [ ] methodParamTypes , String [ ] methodSignatures , TransactionAttribute [ ] transactionAttrs , ActivitySessionAttribute [ ] activitySessionAttrs , BeanMetaData bmd ) throws EJBConfigurationException { } }
final boolean isTraceOn = TraceComponent . isAnyTracingEnabled ( ) ; if ( isTraceOn && tc . isEntryEnabled ( ) ) Tr . entry ( tc , "initializeBeanMethodTransactionAttributes: " + methodInterface ) ; int numMethods = ejbMethods . length ; int numExplicitMethods = numMethods - ( addRemove ? 1 : 0 ) ; for ( int i = 0 ; i < numExplicitMethods ; i ++ ) { // For Bean - managed TX ( session beans only ) , initialize method Tx attrs // to TX _ BEAN _ MANAGED . For other cases , initialize to TX _ REQUIRED . // FIX _ ME ( RRS ) : I made this TX _ REQUIRED to conform to the way it was in 4.0.1 , but // it seems to me that this should perhaps be TX _ NOT _ SUPPORTED ? // ejbTimeout must default to REQUIRES _ NEW since REQUIRED is not // allowed per the EJB Specification . LI2281.11 if ( bmd . usesBeanManagedTx ) transactionAttrs [ i ] = TransactionAttribute . TX_BEAN_MANAGED ; // LIDB441.5 - For Bean - managed AS ( session beans only ) , initialize method AS attrs // to AS _ BEAN _ MANAGED . For other cases , initialize to AS _ UNKNOWN . activitySessionAttrs [ i ] = ( bmd . usesBeanManagedAS ) ? ActivitySessionAttribute . AS_BEAN_MANAGED : ActivitySessionAttribute . AS_UNKNOWN ; } int metaMethodElementKind = methodInterface . getValue ( ) ; // Only get and check method - level Tx attributes if container is managing Tx if ( ! bmd . usesBeanManagedTx ) { // Process all Transaction Attributes from WCCM . If there is no transactionList , // assume this is an annotations only configuration scenario . if ( transactionList != null ) { MethodAttribUtils . getXMLCMTransactions ( transactionAttrs , metaMethodElementKind , methodNames , methodParamTypes , transactionList , bmd ) ; // PK93643 } // Process all Transaction Attributes from Java Annotations , // and set defaults for all methods not explicitly configured . MethodAttribUtils . getAnnotationCMTransactions ( transactionAttrs , metaMethodElementKind , ejbMethods , bmd ) ; // Per section 11.4.1 of EJB 1.1 spec : // - for EntityBeans : getEJBHome , getHandle , getPrimaryKey and isIdentical must be TX _ NOT _ SUPPORTED // - for SessionBeans : all EJBObject interface methods must be TX _ NOT _ SUPPORTED if ( isEntity ) { MethodAttribUtils . checkTxAttrs ( transactionAttrs , methodNames , methodSignatures , // PQ63130 entityNoTxAttrMethods , entityNoTxAttrMethodSignatures , // PQ63130 TransactionAttribute . TX_NOT_SUPPORTED ) ; } else if ( addRemove ) { // For Session beans , the only EJBObject or EJBLocalObject method // that could be in the list is the ' remove ' method added to the // end . If it is present , force it to NOT _ SUPPORTED . // Note that there may be another ' remove ' method in the list from // the business interfaces . . . . but it is allowed for that one to // have a different transaction attribute . d405948 transactionAttrs [ numExplicitMethods ] = TransactionAttribute . TX_NOT_SUPPORTED ; } if ( methodInterface == MethodInterface . TIMED_OBJECT ) { // Per EJB Specification , ejbTimeout must be TX _ REQUIRES _ NEW or // TX _ NOT _ SUPPORTED . However , internally , TX _ REQUIRES _ NEW will be // implemented as TX _ REQUIRED so that the EJB method shares the // global transaction begun by scheduler QOS _ ONLYONCE . LI2281.11 // If timerQOSAtLeastOnceForRequired is specified , TX _ REQUIRED is // implemented as TX _ REQUIRES _ NEW , which uses scheduler // QOS _ ATLEASTONCE and then begins a new global transaction for the // EJB method as expected . RTC116312 for ( int i = 0 ; i < numMethods ; i ++ ) { TransactionAttribute txAttr = transactionAttrs [ i ] ; if ( txAttr == TransactionAttribute . TX_REQUIRES_NEW ) { transactionAttrs [ i ] = TransactionAttribute . TX_REQUIRED ; } else if ( txAttr == TransactionAttribute . TX_REQUIRED && ContainerProperties . TimerQOSAtLeastOnceForRequired ) { if ( isTraceOn && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "updating " + ejbMethods [ i ] + " from TX_REQUIRED to TX_REQUIRES_NEW for QOS_ATLEASTONCE" ) ; transactionAttrs [ i ] = TransactionAttribute . TX_REQUIRES_NEW ; } } } else if ( methodInterface == MethodInterface . LIFECYCLE_INTERCEPTOR && bmd . type == InternalConstants . TYPE_SINGLETON_SESSION ) { // F743-1751 - Like timer methods , lifecycle interceptor methods // translate REQUIRED into REQUIRES _ NEW for singleton . for ( int i = 0 ; i < numMethods ; i ++ ) { if ( transactionAttrs [ i ] == TransactionAttribute . TX_REQUIRED ) { transactionAttrs [ i ] = TransactionAttribute . TX_REQUIRES_NEW ; } } } } else { if ( bmd . wccm . enterpriseBean != null ) { MethodAttribUtils . chkBMTFromXML ( transactionList , bmd . wccm . enterpriseBean , bmd . j2eeName ) ; // d1414634 } // no need to check annotations if all metadata came // from xml . if ( ! bmd . metadataComplete ) { MethodAttribUtils . chkBMTFromAnnotations ( ejbMethods , bmd . j2eeName ) ; // d395828 } } // Only get method - level Activity Session attributes if container is managing AS if ( ! bmd . usesBeanManagedAS ) { MethodAttribUtils . getActivitySessions ( activitySessionAttrs , metaMethodElementKind , methodNames , methodParamTypes , activitySessionList , bmd . enterpriseBeanName , bmd . usesBeanManagedAS ) ; // LIDB441.5 } // if ! usesBeanManagedAS else { if ( bmd . wccm . enterpriseBean != null ) { MethodAttribUtils . chkBMASFromXML ( activitySessionList , bmd . wccm . enterpriseBean , bmd . j2eeName ) ; // d141634 } } if ( isTraceOn && tc . isEntryEnabled ( ) ) Tr . exit ( tc , "initializeBeanMethodTransactionAttributes: " + methodInterface ) ;
public class MasterProtocol { /** * loop until found the failed connection . * @ param listener current failover * @ param globalInfo server global variables information * @ param addresses list of HostAddress to loop * @ param searchFilter search parameter * @ throws SQLException if not found */ public static void loop ( Listener listener , final GlobalStateInfo globalInfo , final List < HostAddress > addresses , SearchFilter searchFilter ) throws SQLException { } }
MasterProtocol protocol ; ArrayDeque < HostAddress > loopAddresses = new ArrayDeque < > ( addresses ) ; if ( loopAddresses . isEmpty ( ) ) { resetHostList ( listener , loopAddresses ) ; } int maxConnectionTry = listener . getRetriesAllDown ( ) ; boolean firstLoop = true ; SQLException lastQueryException = null ; while ( ! loopAddresses . isEmpty ( ) || ( ! searchFilter . isFailoverLoop ( ) && maxConnectionTry > 0 ) ) { protocol = getNewProtocol ( listener . getProxy ( ) , globalInfo , listener . getUrlParser ( ) ) ; if ( listener . isExplicitClosed ( ) ) { return ; } maxConnectionTry -- ; try { HostAddress host = loopAddresses . pollFirst ( ) ; if ( host == null ) { loopAddresses . addAll ( listener . getUrlParser ( ) . getHostAddresses ( ) ) ; host = loopAddresses . pollFirst ( ) ; } protocol . setHostAddress ( host ) ; protocol . connect ( ) ; if ( listener . isExplicitClosed ( ) ) { protocol . close ( ) ; return ; } listener . removeFromBlacklist ( protocol . getHostAddress ( ) ) ; listener . foundActiveMaster ( protocol ) ; return ; } catch ( SQLException e ) { listener . addToBlacklist ( protocol . getHostAddress ( ) ) ; lastQueryException = e ; } // if server has try to connect to all host , and master still fail // add all servers back to continue looping until maxConnectionTry is reached if ( loopAddresses . isEmpty ( ) && ! searchFilter . isFailoverLoop ( ) && maxConnectionTry > 0 ) { resetHostList ( listener , loopAddresses ) ; if ( firstLoop ) { firstLoop = false ; } else { try { // wait 250ms before looping through all connection another time Thread . sleep ( 250 ) ; } catch ( InterruptedException interrupted ) { // interrupted , continue } } } } if ( lastQueryException != null ) { throw new SQLException ( "No active connection found for master : " + lastQueryException . getMessage ( ) , lastQueryException . getSQLState ( ) , lastQueryException . getErrorCode ( ) , lastQueryException ) ; } throw new SQLException ( "No active connection found for master" ) ;
public class X509CertImpl { /** * Gets the publickey from this certificate . * @ return the publickey . */ public PublicKey getPublicKey ( ) { } }
if ( info == null ) return null ; try { PublicKey key = ( PublicKey ) info . get ( CertificateX509Key . NAME + DOT + CertificateX509Key . KEY ) ; return key ; } catch ( Exception e ) { return null ; }
public class ComponentCollision { /** * Add point and adjacent points depending of the collidable max collision size . * @ param minX The min horizontal location . * @ param minY The min vertical location . * @ param maxX The min horizontal location . * @ param maxY The min vertical location . * @ param collidable The collidable reference . */ private void addPoints ( int minX , int minY , int maxX , int maxY , Collidable collidable ) { } }
addPoint ( new Point ( minX , minY ) , collidable ) ; if ( minX != maxX && minY == maxY ) { addPoint ( new Point ( maxX , minY ) , collidable ) ; } else if ( minX == maxX && minY != maxY ) { addPoint ( new Point ( minX , maxY ) , collidable ) ; } else if ( minX != maxX ) { addPoint ( new Point ( minX , maxY ) , collidable ) ; addPoint ( new Point ( maxX , minY ) , collidable ) ; addPoint ( new Point ( maxX , maxY ) , collidable ) ; }
public class TextToSpeech { /** * Get a voice . * Gets information about the specified voice . The information includes the name , language , gender , and other details * about the voice . Specify a customization ID to obtain information for that custom voice model of the specified * voice . To list information about all available voices , use the * * List voices * * method . * * * See also : * * [ Listing a specific voice ] ( https : / / cloud . ibm . com / docs / services / text - to - speech / voices . html # listVoice ) . * @ param getVoiceOptions the { @ link GetVoiceOptions } containing the options for the call * @ return a { @ link ServiceCall } with a response type of { @ link Voice } */ public ServiceCall < Voice > getVoice ( GetVoiceOptions getVoiceOptions ) { } }
Validator . notNull ( getVoiceOptions , "getVoiceOptions cannot be null" ) ; String [ ] pathSegments = { "v1/voices" } ; String [ ] pathParameters = { getVoiceOptions . voice ( ) } ; RequestBuilder builder = RequestBuilder . get ( RequestBuilder . constructHttpUrl ( getEndPoint ( ) , pathSegments , pathParameters ) ) ; Map < String , String > sdkHeaders = SdkCommon . getSdkHeaders ( "text_to_speech" , "v1" , "getVoice" ) ; for ( Entry < String , String > header : sdkHeaders . entrySet ( ) ) { builder . header ( header . getKey ( ) , header . getValue ( ) ) ; } builder . header ( "Accept" , "application/json" ) ; if ( getVoiceOptions . customizationId ( ) != null ) { builder . query ( "customization_id" , getVoiceOptions . customizationId ( ) ) ; } return createServiceCall ( builder . build ( ) , ResponseConverterUtils . getObject ( Voice . class ) ) ;
public class GeometryColumnsSfSqlDao { /** * { @ inheritDoc } */ @ Override public GeometryColumnsSfSql queryForId ( TableColumnKey key ) throws SQLException { } }
GeometryColumnsSfSql geometryColumns = null ; if ( key != null ) { Map < String , Object > fieldValues = new HashMap < String , Object > ( ) ; fieldValues . put ( GeometryColumnsSfSql . COLUMN_F_TABLE_NAME , key . getTableName ( ) ) ; fieldValues . put ( GeometryColumnsSfSql . COLUMN_F_GEOMETRY_COLUMN , key . getColumnName ( ) ) ; List < GeometryColumnsSfSql > results = queryForFieldValues ( fieldValues ) ; if ( ! results . isEmpty ( ) ) { if ( results . size ( ) > 1 ) { throw new SQLException ( "More than one " + GeometryColumnsSfSql . class . getSimpleName ( ) + " returned for key. Table Name: " + key . getTableName ( ) + ", Column Name: " + key . getColumnName ( ) ) ; } geometryColumns = results . get ( 0 ) ; } } return geometryColumns ;
public class SingleOutputStreamOperator { /** * Adds a type information hint about the return type of this operator . This method * can be used in cases where Flink cannot determine automatically what the produced * type of a function is . That can be the case if the function uses generic type variables * in the return type that cannot be inferred from the input type . * < p > Classes can be used as type hints for non - generic types ( classes without generic parameters ) , * but not for generic types like for example Tuples . For those generic types , please * use the { @ link # returns ( TypeHint ) } method . * @ param typeClass The class of the returned data type . * @ return This operator with the type information corresponding to the given type class . */ public SingleOutputStreamOperator < T > returns ( Class < T > typeClass ) { } }
requireNonNull ( typeClass , "type class must not be null." ) ; try { return returns ( TypeInformation . of ( typeClass ) ) ; } catch ( InvalidTypesException e ) { throw new InvalidTypesException ( "Cannot infer the type information from the class alone." + "This is most likely because the class represents a generic type. In that case," + "please use the 'returns(TypeHint)' method instead." ) ; }
public class ServletTimerImpl { /** * Method that actually */ public void run ( ) { } }
final MobicentsSipApplicationSession sipApplicationSession = getApplicationSession ( ) ; SipContext sipContext = sipApplicationSession . getSipContext ( ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "running Servlet Timer " + id + " for sip application session " + sipApplicationSession ) ; } boolean batchStarted = false ; ClassLoader oldClassLoader = Thread . currentThread ( ) . getContextClassLoader ( ) ; try { sipContext . enterSipContext ( ) ; sipContext . enterSipApp ( sipApplicationSession , null , false , true ) ; batchStarted = sipContext . enterSipAppHa ( true ) ; if ( isCanceled == false ) { listener . timeout ( this ) ; } else { logger . debug ( "running Servlet Timer " + id + " for sip application session " + sipApplicationSession + " is cancelled, so we skip its timerListener's timeout() method call!" ) ; } } catch ( Throwable t ) { logger . error ( "An unexpected exception happened in the timer callback!" , t ) ; } finally { try { sipContext . exitSipContext ( oldClassLoader ) ; if ( isRepeatingTimer ) { estimateNextExecution ( ) ; } else { // this non - repeating timer is now " ready " // and should not be included in the list of active timers // The application may already have canceled ( ) the timer though cancel ( ) ; // dont bother about return value . . . . } if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Servlet Timer " + id + " for sip application session " + sipApplicationSession + " ended" ) ; } } finally { sipContext . exitSipAppHa ( null , null , batchStarted ) ; sipContext . exitSipApp ( sipApplicationSession , null ) ; } }
public class JobTargetExecutionsInner { /** * Lists the target executions of a job step execution . * @ param resourceGroupName The name of the resource group that contains the resource . You can obtain this value from the Azure Resource Manager API or the portal . * @ param serverName The name of the server . * @ param jobAgentName The name of the job agent . * @ param jobName The name of the job to get . * @ param jobExecutionId The id of the job execution * @ param stepName The name of the step . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the PagedList & lt ; JobExecutionInner & gt ; object if successful . */ public PagedList < JobExecutionInner > listByStep ( final String resourceGroupName , final String serverName , final String jobAgentName , final String jobName , final UUID jobExecutionId , final String stepName ) { } }
ServiceResponse < Page < JobExecutionInner > > response = listByStepSinglePageAsync ( resourceGroupName , serverName , jobAgentName , jobName , jobExecutionId , stepName ) . toBlocking ( ) . single ( ) ; return new PagedList < JobExecutionInner > ( response . body ( ) ) { @ Override public Page < JobExecutionInner > nextPage ( String nextPageLink ) { return listByStepNextSinglePageAsync ( nextPageLink ) . toBlocking ( ) . single ( ) . body ( ) ; } } ;
public class SlowQueryListener { /** * Calculate a key for given { @ link ExecutionInfo } . * < p > This key is passed to the slow query check { @ link Runnable } as well as for removal in { @ link # afterQuery ( ExecutionInfo , List ) } . * < p > Default implementation uses { @ link System # identityHashCode ( Object ) } . This does NOT guarantee 100 % of uniqueness ; however , since * the current usage of the key is short lived and good enough for this use case . * < p > Subclass can override this method to provide different implementation to uniquely represent { @ link ExecutionInfo } . * @ param executionInfo execution info * @ return key */ protected String getExecutionInfoKey ( ExecutionInfo executionInfo ) { } }
int exeInfoKey = System . identityHashCode ( executionInfo ) ; return String . valueOf ( exeInfoKey ) ;
public class AbstractMappableValidator { /** * Disposes all data providers and rules that are mapped to each other . < br > Note that some data providers may have * been * disposed already in the other disposal methods . */ private void disposeDataProvidersAndRules ( ) { } }
for ( final Map . Entry < DP , List < R > > entry : dataProvidersToRules . entrySet ( ) ) { // Dispose data provider final DP dataProvider = entry . getKey ( ) ; if ( dataProvider instanceof Disposable ) { ( ( Disposable ) dataProvider ) . dispose ( ) ; } // Dispose rules final List < R > rules = entry . getValue ( ) ; if ( rules != null ) { for ( final R rule : rules ) { if ( rule instanceof Disposable ) { ( ( Disposable ) rule ) . dispose ( ) ; } } } } // Clears all triggers dataProvidersToRules . clear ( ) ;
public class JSONArray { /** * Returns a List or a Set taking generics into account . < br / > Contributed by * [ Matt Small @ WaveMaker ] . */ public static Collection toCollection ( JSONArray jsonArray , JsonConfig jsonConfig ) { } }
Collection collection = null ; Class collectionType = jsonConfig . getCollectionType ( ) ; if ( collectionType . isInterface ( ) ) { if ( collectionType . equals ( List . class ) ) { collection = new ArrayList ( ) ; } else if ( collectionType . equals ( Set . class ) ) { collection = new HashSet ( ) ; } else { throw new JSONException ( "unknown interface: " + collectionType ) ; } } else { try { collection = ( Collection ) collectionType . newInstance ( ) ; } catch ( InstantiationException e ) { throw new JSONException ( e ) ; } catch ( IllegalAccessException e ) { throw new JSONException ( e ) ; } } Class objectClass = jsonConfig . getRootClass ( ) ; Map classMap = jsonConfig . getClassMap ( ) ; int size = jsonArray . size ( ) ; for ( int i = 0 ; i < size ; i ++ ) { Object value = jsonArray . get ( i ) ; if ( JSONUtils . isNull ( value ) ) { collection . add ( null ) ; } else { Class type = value . getClass ( ) ; if ( JSONArray . class . isAssignableFrom ( value . getClass ( ) ) ) { collection . add ( toCollection ( ( JSONArray ) value , jsonConfig ) ) ; } else if ( String . class . isAssignableFrom ( type ) || Boolean . class . isAssignableFrom ( type ) || JSONUtils . isNumber ( type ) || Character . class . isAssignableFrom ( type ) || JSONFunction . class . isAssignableFrom ( type ) ) { if ( objectClass != null && ! objectClass . isAssignableFrom ( type ) ) { value = JSONUtils . getMorpherRegistry ( ) . morph ( objectClass , value ) ; } collection . add ( value ) ; } else { if ( objectClass != null ) { JsonConfig jsc = jsonConfig . copy ( ) ; jsc . setRootClass ( objectClass ) ; jsc . setClassMap ( classMap ) ; collection . add ( JSONObject . toBean ( ( JSONObject ) value , jsc ) ) ; } else { collection . add ( JSONObject . toBean ( ( JSONObject ) value ) ) ; } } } } return collection ;