signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class AbstractOptionsForSelect { /** * Set the given retry policy * @ throws NullPointerException if value is null */ public T withRetryPolicy ( RetryPolicy retryPolicy ) { } }
getOptions ( ) . setRetryPolicy ( Optional . of ( retryPolicy ) ) ; return getThis ( ) ;
public class ReflectCache { /** * 从缓存里获取方法 * @ param serviceName 服务名 ( 非接口名 ) * @ param methodName 方法名 * @ param methodSigs 方法描述 * @ return 方法 */ public static Method getOverloadMethodCache ( String serviceName , String methodName , String [ ] methodSigs ) { } }
ConcurrentHashMap < String , Method > methods = OVERLOAD_METHOD_CACHE . get ( serviceName ) ; if ( methods == null ) { return null ; } StringBuilder mSigs = new StringBuilder ( 128 ) ; mSigs . append ( methodName ) ; for ( String methodSign : methodSigs ) { mSigs . append ( methodSign ) ; } return methods . get ( mSigs . toString ( ) ) ;
public class CertificatesImpl { /** * Gets information about the specified certificate . * @ param thumbprintAlgorithm The algorithm used to derive the thumbprint parameter . This must be sha1. * @ param thumbprint The thumbprint of the certificate to get . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < Certificate > getAsync ( String thumbprintAlgorithm , String thumbprint , final ServiceCallback < Certificate > serviceCallback ) { } }
return ServiceFuture . fromHeaderResponse ( getWithServiceResponseAsync ( thumbprintAlgorithm , thumbprint ) , serviceCallback ) ;
public class JsonWriter { /** * Write the int as object key . * @ param key The int key . * @ return The JSON Writer . */ public JsonWriter key ( int key ) { } }
startKey ( ) ; writer . write ( '\"' ) ; writer . print ( key ) ; writer . write ( '\"' ) ; writer . write ( ':' ) ; return this ;
public class LeaderElector { /** * Start participation in a leader election . * @ return { @ link Future } which is completed after the leader election has been performed * @ throws KeeperException If there is an error creating election nodes * @ throws InterruptedException If this thread was interrupted */ public Future < ? > start ( ) throws KeeperException , InterruptedException { } }
node = createParticipantNode ( zk , dir , prefix , data ) ; return es . submit ( electionEventHandler ) ;
public class FBContrib { /** * shows the simple help * @ param args * standard command line args */ public static void main ( final String [ ] args ) { } }
JOptionPane . showMessageDialog ( null , "To use fb-contrib, copy this jar file into your local SpotBugs plugin directory, and use SpotBugs as usual.\n\nfb-contrib is a trademark of MeBigFatGuy.com" , "fb-contrib: copyright 2005-2019" , JOptionPane . INFORMATION_MESSAGE ) ; System . exit ( 0 ) ;
public class ImageLoader { /** * Adds display image task to execution pool . Image will be set to ImageAware when it ' s turn . < br / > * Default { @ linkplain DisplayImageOptions display image options } from { @ linkplain ImageLoaderConfiguration * configuration } will be used . < br / > * < b > NOTE : < / b > { @ link # init ( ImageLoaderConfiguration ) } method must be called before this method call * @ param uri Image URI ( i . e . " http : / / site . com / image . png " , " file : / / / mnt / sdcard / image . png " ) * @ param imageAware { @ linkplain com . nostra13 . universalimageloader . core . imageaware . ImageAware Image aware view } * which should display image * @ throws IllegalStateException if { @ link # init ( ImageLoaderConfiguration ) } method wasn ' t called before * @ throws IllegalArgumentException if passed < b > imageAware < / b > is null */ public void displayImage ( String uri , ImageAware imageAware ) { } }
displayImage ( uri , imageAware , null , null , null ) ;
public class CreateVpcPeeringConnectionRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( CreateVpcPeeringConnectionRequest createVpcPeeringConnectionRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( createVpcPeeringConnectionRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( createVpcPeeringConnectionRequest . getFleetId ( ) , FLEETID_BINDING ) ; protocolMarshaller . marshall ( createVpcPeeringConnectionRequest . getPeerVpcAwsAccountId ( ) , PEERVPCAWSACCOUNTID_BINDING ) ; protocolMarshaller . marshall ( createVpcPeeringConnectionRequest . getPeerVpcId ( ) , PEERVPCID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class InternalUtils { /** * Set the forwarded form . This overrides the auto - generated form created by processActionForm * and populated by processPopulate ( in PageFlowRequestProcessor ) . */ public static void setForwardedFormBean ( ServletRequest request , ActionForm form ) { } }
if ( form == null ) { request . removeAttribute ( FORWARDED_FORMBEAN_ATTR ) ; } else { request . setAttribute ( FORWARDED_FORMBEAN_ATTR , form ) ; }
public class HessianSchurComplement_Base { /** * Computes the gradient using Schur complement * @ param jacLeft ( Input ) Left side of Jacobian * @ param jacRight ( Input ) Right side of Jacobian * @ param residuals ( Input ) Residuals * @ param gradient ( Output ) Gradient */ @ Override public void computeGradient ( S jacLeft , S jacRight , DMatrixRMaj residuals , DMatrixRMaj gradient ) { } }
// Find the gradient using the two matrices for Jacobian // g = J ' * r = [ L , R ] ' * r x1 . reshape ( jacLeft . getNumCols ( ) , 1 ) ; x2 . reshape ( jacRight . getNumCols ( ) , 1 ) ; multTransA ( jacLeft , residuals , x1 ) ; multTransA ( jacRight , residuals , x2 ) ; CommonOps_DDRM . insert ( x1 , gradient , 0 , 0 ) ; CommonOps_DDRM . insert ( x2 , gradient , x1 . numRows , 0 ) ;
public class ChangeAvailabilityResponseHandler { /** * Handles the response when the change of availability has been accepted or scheduled , e . g . not rejected . * @ param chargingStationId The charging station identifier . * @ param domainService The domain service . * @ param addOnIdentity The AddOn identity . */ private void handleAcceptedOrScheduledChangeAvailabilityResponse ( ChargingStationId chargingStationId , DomainService domainService , AddOnIdentity addOnIdentity ) { } }
if ( Changeavailability . Type . INOPERATIVE . equals ( availabilityType ) ) { if ( evseId . getNumberedId ( ) == 0 ) { domainService . changeChargingStationAvailabilityToInoperative ( chargingStationId , getCorrelationToken ( ) , addOnIdentity ) ; } else { domainService . changeComponentAvailabilityToInoperative ( chargingStationId , evseId , ChargingStationComponent . EVSE , getCorrelationToken ( ) , addOnIdentity ) ; } } else { if ( evseId . getNumberedId ( ) == 0 ) { domainService . changeChargingStationAvailabilityToOperative ( chargingStationId , getCorrelationToken ( ) , addOnIdentity ) ; } else { domainService . changeComponentAvailabilityToOperative ( chargingStationId , evseId , ChargingStationComponent . EVSE , getCorrelationToken ( ) , addOnIdentity ) ; } }
public class KillSessionsArgs { /** * Returns true if field corresponding to fieldID is set ( has been assigned a value ) and false otherwise */ public boolean isSet ( _Fields field ) { } }
if ( field == null ) { throw new IllegalArgumentException ( ) ; } switch ( field ) { case SESSION_IDS : return isSetSessionIds ( ) ; case WHO : return isSetWho ( ) ; } throw new IllegalStateException ( ) ;
public class MapperHelper { /** * 判断当前的接口方法是否需要进行拦截 * @ param msId * @ return */ public MapperTemplate isMapperMethod ( String msId ) { } }
MapperTemplate mapperTemplate = getMapperTemplateByMsId ( msId ) ; if ( mapperTemplate == null ) { // 通过 @ RegisterMapper 注解自动注册的功能 try { Class < ? > mapperClass = getMapperClass ( msId ) ; if ( mapperClass . isInterface ( ) && hasRegisterMapper ( mapperClass ) ) { mapperTemplate = getMapperTemplateByMsId ( msId ) ; } } catch ( Exception e ) { } } return mapperTemplate ;
public class IoTDiscoveryManager { /** * Thing Claiming - XEP - 0347 § 3.9 */ public IoTClaimed claimThing ( Collection < Tag > metaTags ) throws NoResponseException , XMPPErrorException , NotConnectedException , InterruptedException { } }
return claimThing ( metaTags , true ) ;
public class CachedCacheManager { /** * Bean may , but not necessarily , have deep hierarchies of references * to other beans . Since a cache store beans per schema we must * dig out this hierarchy and flatten it out , */ private Set < Bean > flattenReferences ( Bean bean ) { } }
Set < Bean > beans = new HashSet < > ( ) ; for ( String referenceName : bean . getReferenceNames ( ) ) { List < BeanId > ids = bean . getReference ( referenceName ) ; for ( BeanId id : ids ) { if ( id . getBean ( ) == null ) { continue ; } beans . addAll ( flattenReferences ( id . getBean ( ) ) ) ; } } beans . add ( bean ) ; return beans ;
public class JavaClasspathParser { /** * Returns the kind of a < code > PackageFragmentRoot < / code > from its < code > String < / code > form . * @ param kindStr * - string to test * @ return the integer identifier of the type of the specified string : CPE _ PROJECT , CPE _ VARIABLE , CPE _ CONTAINER , etc . */ @ SuppressWarnings ( "checkstyle:equalsavoidnull" ) private static int kindFromString ( String kindStr ) { } }
if ( kindStr . equalsIgnoreCase ( "prj" ) ) { // $ NON - NLS - 1 $ return IClasspathEntry . CPE_PROJECT ; } if ( kindStr . equalsIgnoreCase ( "var" ) ) { // $ NON - NLS - 1 $ return IClasspathEntry . CPE_VARIABLE ; } if ( kindStr . equalsIgnoreCase ( "con" ) ) { // $ NON - NLS - 1 $ return IClasspathEntry . CPE_CONTAINER ; } if ( kindStr . equalsIgnoreCase ( "src" ) ) { // $ NON - NLS - 1 $ return IClasspathEntry . CPE_SOURCE ; } if ( kindStr . equalsIgnoreCase ( "lib" ) ) { // $ NON - NLS - 1 $ return IClasspathEntry . CPE_LIBRARY ; } if ( kindStr . equalsIgnoreCase ( "output" ) ) { // $ NON - NLS - 1 $ return ClasspathEntry . K_OUTPUT ; } return - 1 ;
public class MultimapJoiner { /** * Returns a multimap joiner with the same behavior as this one , except automatically substituting { @ code nullText } for * any provided null keys or values . */ public MultimapJoiner useForNull ( final String nullText ) { } }
return new MultimapJoiner ( entryJoiner . useForNull ( nullText ) , separator , keyValueSeparator ) { @ Override protected CharSequence toString ( Object part ) { return ( part == null ) ? nullText : MultimapJoiner . this . toString ( part ) ; } @ Override public MultimapJoiner useForNull ( String nullText ) { throw new UnsupportedOperationException ( "already specified useForNull" ) ; } @ Override public MultimapJoiner skipNulls ( ) { throw new UnsupportedOperationException ( "already specified useForNull" ) ; } } ;
public class JQueryJsAppenderBehavior { /** * Factory method to create the rendered statement . * @ param component * the component * @ return the char sequence */ public CharSequence newRenderedStatement ( final Component component ) { } }
final JsStatement statement = new JsQuery ( component ) . $ ( ) . chain ( statementLabel , JsUtils . quotes ( statementArgs ) ) ; // $ ( ' # component ' ) . statementLabel ( ' statementArgs ' ) ; return statement . render ( ) ;
public class TypeAnnotationPosition { /** * Create a { @ code TypeAnnotationPosition } for a constructor reference . * @ param location The type path . * @ param onLambda The lambda for this constructor reference . * @ param pos The position from the associated tree node . */ public static TypeAnnotationPosition constructorRef ( final List < TypePathEntry > location , final JCLambda onLambda , final int pos ) { } }
return new TypeAnnotationPosition ( TargetType . CONSTRUCTOR_REFERENCE , pos , Integer . MIN_VALUE , onLambda , Integer . MIN_VALUE , Integer . MIN_VALUE , location ) ;
public class OcrClient { /** * Gets the general recognition properties of specific image resource . * The caller < i > must < / i > authenticate with a valid BCE Access Key / Private Key pair . * @ param image The image data which needs to be base64 * @ return The general recognition properties of the image resource */ public GeneralRecognitionResponse generalRecognition ( String image ) { } }
GeneralRecognitionRequest request = new GeneralRecognitionRequest ( ) . withImage ( image ) ; return generalRecognition ( request ) ;
public class AdsServiceClientFactory { /** * Creates the proxy for the { @ link AdsServiceClient } . * @ param < T > the service type * @ param adsServiceClient the client to proxy * @ return the proxy */ < T > T createProxy ( Class < T > interfaceClass , C adsServiceClient ) { } }
Set < Class < ? > > interfaces = Sets . newHashSet ( adsServiceClient . getClass ( ) . getInterfaces ( ) ) ; interfaces . add ( interfaceClass ) ; Object proxy = Proxy . newProxyInstance ( adsServiceClient . getSoapClient ( ) . getClass ( ) . getClassLoader ( ) , interfaces . toArray ( new Class [ ] { } ) , adsServiceClient ) ; return interfaceClass . cast ( proxy ) ;
public class CommerceNotificationTemplateLocalServiceBaseImpl { /** * Returns a range of all the commerce notification templates . * Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link com . liferay . portal . kernel . dao . orm . QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link com . liferay . portal . kernel . dao . orm . QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link com . liferay . commerce . notification . model . impl . CommerceNotificationTemplateModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order . * @ param start the lower bound of the range of commerce notification templates * @ param end the upper bound of the range of commerce notification templates ( not inclusive ) * @ return the range of commerce notification templates */ @ Override public List < CommerceNotificationTemplate > getCommerceNotificationTemplates ( int start , int end ) { } }
return commerceNotificationTemplatePersistence . findAll ( start , end ) ;
public class ConjunctionImpl { /** * Add a SimpleTest to the Conjunction , searching for contradictions . * @ param newTest the new SimpleTest to be added * @ return true if the new test is compatible with the old ( a false return means the * conjunction will always be false because the test is always false ) */ public boolean and ( SimpleTest newTest ) { } }
for ( int i = 0 ; i < tmpSimpleTests . size ( ) ; i ++ ) { SimpleTest cand = ( SimpleTest ) tmpSimpleTests . get ( i ) ; if ( cand . getIdentifier ( ) . getName ( ) . equals ( newTest . getIdentifier ( ) . getName ( ) ) ) { // Careful , may be operating in XPath selector domain , in which // case we need more stringent tests if ( cand . getIdentifier ( ) . isExtended ( ) ) { if ( cand . getIdentifier ( ) . getStep ( ) == newTest . getIdentifier ( ) . getStep ( ) ) { // Identifiers have same name and same location step return cand . combine ( newTest ) ; } } else return cand . combine ( newTest ) ; } } tmpSimpleTests . add ( newTest ) ; alwaysTrue = false ; return true ;
public class PrcItSpecEmbFlDel { /** * < p > Process entity request . < / p > * @ param pReqVars additional request scoped parameters * @ param pRequestData Request Data * @ param pEntity Entity to process * @ return Entity processed for farther process or null * @ throws Exception - an exception */ @ Override public final AItemSpecifics < T , ID > process ( final Map < String , Object > pReqVars , final AItemSpecifics < T , ID > pEntity , final IRequestData pRequestData ) throws Exception { } }
File fileToDel ; if ( pEntity . getStringValue2 ( ) != null ) { fileToDel = new File ( pEntity . getStringValue2 ( ) ) ; if ( fileToDel . exists ( ) && ! fileToDel . delete ( ) ) { throw new ExceptionWithCode ( ExceptionWithCode . SOMETHING_WRONG , "Can not delete file: " + fileToDel ) ; } } if ( pEntity . getStringValue3 ( ) != null ) { int idhHtml = pEntity . getStringValue1 ( ) . indexOf ( ".html" ) ; String urlWithoutHtml = pEntity . getStringValue1 ( ) . substring ( 0 , idhHtml ) ; for ( String lang : pEntity . getStringValue3 ( ) . split ( "," ) ) { String filePath = this . webAppPath + File . separator + urlWithoutHtml + "_" + lang + ".html" ; fileToDel = new File ( filePath ) ; if ( fileToDel . exists ( ) && ! fileToDel . delete ( ) ) { throw new ExceptionWithCode ( ExceptionWithCode . SOMETHING_WRONG , "Can not delete file: " + fileToDel ) ; } } } getSrvOrm ( ) . deleteEntity ( pReqVars , pEntity ) ; return null ;
public class StyleUtils { /** * Create new polygon options populated with the feature style * @ param featureStyle feature style * @ param density display density : { @ link android . util . DisplayMetrics # density } * @ return polygon options populated with the feature style */ public static PolygonOptions createPolygonOptions ( FeatureStyle featureStyle , float density ) { } }
PolygonOptions polygonOptions = new PolygonOptions ( ) ; setFeatureStyle ( polygonOptions , featureStyle , density ) ; return polygonOptions ;
public class ConnectionDAODefaultImpl { public void init ( final Connection connection , final String devname , final String host , final String port ) throws DevFailed { } }
connection . url = new TangoUrl ( buildUrlName ( host , port , devname ) ) ; connection . devname = connection . url . devname ; connection . setDevice_is_dbase ( false ) ; // Check if connection is possible if ( connection . url . protocol == TANGO ) { connection . ior = get_exported_ior ( connection ) ; }
public class HttpServerHandler { /** * Instrument an incoming request before it is handled . * < p > This method will create a span under the deserialized propagated parent context . If the * parent context is not present , the span will be created under the current context . * < p > The generated span will NOT be set as current context . User can control when to enter the * scope of this span . Use { @ link AbstractHttpHandler # getSpanFromContext } to retrieve the span . * @ param carrier the entity that holds the HTTP information . * @ param request the request entity . * @ return the { @ link HttpRequestContext } that contains stats and trace data associated with the * request . * @ since 0.19 */ public HttpRequestContext handleStart ( C carrier , Q request ) { } }
checkNotNull ( carrier , "carrier" ) ; checkNotNull ( request , "request" ) ; SpanBuilder spanBuilder = null ; String spanName = getSpanName ( request , extractor ) ; // de - serialize the context SpanContext spanContext = null ; try { spanContext = textFormat . extract ( carrier , getter ) ; } catch ( SpanContextParseException e ) { // TODO : Currently we cannot distinguish between context parse error and missing context . // Logging would be annoying so we just ignore this error and do not even log a message . } if ( spanContext == null || publicEndpoint ) { spanBuilder = tracer . spanBuilder ( spanName ) ; } else { spanBuilder = tracer . spanBuilderWithRemoteParent ( spanName , spanContext ) ; } Span span = spanBuilder . setSpanKind ( Kind . SERVER ) . startSpan ( ) ; if ( publicEndpoint && spanContext != null ) { span . addLink ( Link . fromSpanContext ( spanContext , Type . PARENT_LINKED_SPAN ) ) ; } if ( span . getOptions ( ) . contains ( Options . RECORD_EVENTS ) ) { addSpanRequestAttributes ( span , request , extractor ) ; } return getNewContext ( span , tagger . getCurrentTagContext ( ) ) ;
public class WarningsResult { /** * { @ inheritDoc } */ public String getDisplayName ( ) { } }
if ( group == null ) { return Messages . Warnings_ProjectAction_Name ( ) ; } else { return ParserRegistry . getParser ( group ) . getLinkName ( ) . toString ( ) ; }
public class CrystalBuilder { /** * Returns the list of unique interfaces that the given Structure has upon * generation of all crystal symmetry mates . An interface is defined as any pair of chains * that contact , i . e . for which there is at least a pair of atoms ( one from each chain ) within * the given cutoff distance . * @ param cutoff the distance cutoff for 2 chains to be considered in contact * @ return */ public StructureInterfaceList getUniqueInterfaces ( double cutoff ) { } }
StructureInterfaceList set = new StructureInterfaceList ( ) ; // certain structures in the PDB are not macromolecules ( contain no polymeric chains at all ) , e . g . 1ao2 // with the current mmCIF parsing , those will be empty since purely non - polymeric chains are removed // see commit e9562781f23da0ebf3547146a307d7edd5741090 if ( numPolyChainsAu == 0 ) { logger . warn ( "No chains present in the structure! No interfaces will be calculated" ) ; return set ; } // pass the chainOrigNames map in NCS case so that StructureInterfaceList can deal with original to NCS chain names conversion if ( chainOrigNames != null ) { set . setChainOrigNamesMap ( chainOrigNames ) ; } // initialising the visited ArrayList for keeping track of symmetry redundancy initialiseVisited ( ) ; // the isCrystallographic ( ) condition covers 3 cases : // a ) entries with expMethod X - RAY / other diffraction and defined crystalCell ( most usual case ) // b ) entries with expMethod null but defined crystalCell ( e . g . PDB file with CRYST1 record but no expMethod annotation ) // c ) entries with expMethod not X - RAY ( e . g . NMR ) and defined crystalCell ( NMR entries do have a dummy CRYST1 record " 1 1 1 90 90 90 P1 " ) // d ) isCrystallographic will be false if the structure is crystallographic but the space group was not recognized calcInterfacesCrystal ( set , cutoff ) ; return set ;
public class SourceLineAnnotation { /** * Factory method for creating a source line annotation describing the * source line number for the instruction being visited by given visitor . * @ param classContext * the ClassContext * @ param visitor * a BetterVisitor which is visiting the method * @ param pc * the bytecode offset of the instruction in the method * @ return the SourceLineAnnotation , or null if we do not have line number * information for the instruction */ public static SourceLineAnnotation fromVisitedInstruction ( ClassContext classContext , PreorderVisitor visitor , int pc ) { } }
return fromVisitedInstructionRange ( classContext , visitor , pc , pc ) ;
public class Polarizability { /** * calculates the mean molecular polarizability as described in paper of Kang and Jhorn . * @ param atomContainer AtomContainer * @ return polarizabilitiy */ public double calculateKJMeanMolecularPolarizability ( IAtomContainer atomContainer ) { } }
double polarizabilitiy = 0 ; IAtomContainer acH = atomContainer . getBuilder ( ) . newInstance ( IAtomContainer . class , atomContainer ) ; addExplicitHydrogens ( acH ) ; for ( int i = 0 ; i < acH . getAtomCount ( ) ; i ++ ) { polarizabilitiy += getKJPolarizabilityFactor ( acH , acH . getAtom ( i ) ) ; } return polarizabilitiy ;
public class Javalin { /** * Adds a PATCH request handler for the specified path to the instance . * @ see < a href = " https : / / javalin . io / documentation # handlers " > Handlers in docs < / a > */ public Javalin patch ( @ NotNull String path , @ NotNull Handler handler ) { } }
return addHandler ( HandlerType . PATCH , path , handler ) ;
public class RowExtractors { /** * Obtain an extractor of a tuple containing each of the values from the supplied extractors . * @ param extractors the extractors ; may not be null or empty * @ return the tuple extractor */ public static ExtractFromRow extractorWith ( final Collection < ExtractFromRow > extractors ) { } }
final int len = extractors . size ( ) ; assert len > 0 ; // There are a few cases where specific row extractor implementations would be better . . . if ( len == 1 ) { return extractors . iterator ( ) . next ( ) ; } if ( len == 2 ) { Iterator < ExtractFromRow > iter = extractors . iterator ( ) ; ExtractFromRow first = iter . next ( ) ; ExtractFromRow second = iter . next ( ) ; return extractorWith ( first , second ) ; } if ( len == 3 ) { Iterator < ExtractFromRow > iter = extractors . iterator ( ) ; ExtractFromRow first = iter . next ( ) ; ExtractFromRow second = iter . next ( ) ; ExtractFromRow third = iter . next ( ) ; return extractorWith ( first , second , third ) ; } if ( len == 4 ) { Iterator < ExtractFromRow > iter = extractors . iterator ( ) ; ExtractFromRow first = iter . next ( ) ; ExtractFromRow second = iter . next ( ) ; ExtractFromRow third = iter . next ( ) ; ExtractFromRow fourth = iter . next ( ) ; return extractorWith ( first , second , third , fourth ) ; } // Okay , there are at least 4 extractors , so we need to return a general - case row extractor . . . Collection < TypeFactory < ? > > types = new ArrayList < TypeFactory < ? > > ( ) ; final ExtractFromRow [ ] extracts = new ExtractFromRow [ len ] ; int i = 0 ; for ( ExtractFromRow extractor : extractors ) { extracts [ i ++ ] = extractor ; types . add ( extractor . getType ( ) ) ; } final TypeFactory < ? > type = Tuples . typeFactory ( types ) ; return new ExtractFromRow ( ) { @ Override public TypeFactory < ? > getType ( ) { return type ; } @ Override public Object getValueInRow ( RowAccessor row ) { Object [ ] values = new Object [ len ] ; for ( int i = 0 ; i != len ; ++ i ) { values [ i ] = extracts [ i ] . getValueInRow ( row ) ; } return Tuples . tuple ( values ) ; } } ;
public class ChangesListener { /** * Push changes to coordinator to apply . */ protected void pushChangesToCoordinator ( ChangesItem changesItem ) throws SecurityException , RPCException { } }
if ( ! changesItem . isEmpty ( ) ) { rpcService . executeCommandOnCoordinator ( applyPersistedChangesTask , true , changesItem ) ; }
public class BeanDefinitionDtoConverterServiceImpl { /** * Convert from a DTO to an internal Spring bean definition . * @ param beanDefinitionDto The DTO object . * @ return Returns a Spring bean definition . */ public BeanDefinition toInternal ( BeanDefinitionInfo beanDefinitionInfo ) { } }
if ( beanDefinitionInfo instanceof GenericBeanDefinitionInfo ) { GenericBeanDefinitionInfo genericInfo = ( GenericBeanDefinitionInfo ) beanDefinitionInfo ; GenericBeanDefinition def = new GenericBeanDefinition ( ) ; def . setBeanClassName ( genericInfo . getClassName ( ) ) ; if ( genericInfo . getPropertyValues ( ) != null ) { MutablePropertyValues propertyValues = new MutablePropertyValues ( ) ; for ( Entry < String , BeanMetadataElementInfo > entry : genericInfo . getPropertyValues ( ) . entrySet ( ) ) { BeanMetadataElementInfo info = entry . getValue ( ) ; propertyValues . add ( entry . getKey ( ) , toInternal ( info ) ) ; } def . setPropertyValues ( propertyValues ) ; } return def ; } else if ( beanDefinitionInfo instanceof ObjectBeanDefinitionInfo ) { ObjectBeanDefinitionInfo objectInfo = ( ObjectBeanDefinitionInfo ) beanDefinitionInfo ; return createBeanDefinitionByIntrospection ( objectInfo . getObject ( ) ) ; } else { throw new IllegalArgumentException ( "Conversion to internal of " + beanDefinitionInfo . getClass ( ) . getName ( ) + " not implemented" ) ; }
public class LeaderAppender { /** * Triggers a heartbeat to a majority of the cluster . * For followers to which no AppendRequest is currently being sent , a new empty AppendRequest will be * created and sent . For followers to which an AppendRequest is already being sent , the appendEntries ( ) * call will piggyback on the * next * AppendRequest . Thus , multiple calls to this method will only ever * result in a single AppendRequest to each follower at any given time , and the returned future will be * shared by all concurrent calls . * @ return A completable future to be completed the next time a heartbeat is received by a majority of the cluster . */ public CompletableFuture < Long > appendEntries ( ) { } }
// If there are no other active members in the cluster , simply complete the append operation . if ( context . getClusterState ( ) . getRemoteMemberStates ( ) . isEmpty ( ) ) return CompletableFuture . completedFuture ( null ) ; // If no heartbeat future already exists , that indicates there ' s no heartbeat currently under way . // Create a new heartbeat future and commit to all members in the cluster . if ( heartbeatFuture == null ) { CompletableFuture < Long > newHeartbeatFuture = new CompletableFuture < > ( ) ; heartbeatFuture = newHeartbeatFuture ; heartbeatTime = System . currentTimeMillis ( ) ; for ( MemberState member : context . getClusterState ( ) . getRemoteMemberStates ( ) ) { appendEntries ( member ) ; } return newHeartbeatFuture ; } // If a heartbeat future already exists , that indicates there is a heartbeat currently underway . // We don ' t want to allow callers to be completed by a heartbeat that may already almost be done . // So , we create the next heartbeat future if necessary and return that . Once the current heartbeat // completes the next future will be used to do another heartbeat . This ensures that only one // heartbeat can be outstanding at any given point in time . else if ( nextHeartbeatFuture == null ) { nextHeartbeatFuture = new CompletableFuture < > ( ) ; return nextHeartbeatFuture ; } else { return nextHeartbeatFuture ; }
public class QuantilesHelper { /** * Convert the weights into totals of the weights preceding each item * @ param array of weights * @ return total weight */ public static long convertToPrecedingCummulative ( final long [ ] array ) { } }
long subtotal = 0 ; for ( int i = 0 ; i < array . length ; i ++ ) { final long newSubtotal = subtotal + array [ i ] ; array [ i ] = subtotal ; subtotal = newSubtotal ; } return subtotal ;
public class LocaleUtils { /** * Resolves the best matching locale from the given set of preferred * locales , supported locales and the default locale . Use ordered * collections to get deterministic results and prevent unexpected behavior . * The algorithm for resolving works as follows : * < ol > * < li > If no supported or preferred locales are given , return default * locale . < / li > * < li > * Iterate over preferred locales * < ol > * < li > If a perfect match can be found , return it . < / li > * < li > If a supported locale without a variant that matches the language and * country of the preferred locale can be found , return it < / li > * < li > If strict variants matching is turned off and a supported locale that * matches the language and country of the preferred locale can be found , * return it < / li > * < li > If a supported locale without a variant and country that matches the * language of the preferred locale can be found , return it < / li > * < / ol > * < / li > * < li > If country matching is turned on , return the first locale of the * supported locales that matches the country < / li > * < li > Return the default locale < / li > * < / ol > * @ param preferredLocales The preferred locales to search for . The order of the locales * returned by the iterator represents the preference . * @ param supportedLocales The available locales to search from . The order of the locales * in the collection represents the preference . * @ param defaultLocale The default locale if no match can be found . * @ param countryMatching True if country matching should be turned on , otherwise false . * @ param strictVariantMatching True if a resolved locale may also contain a supported locale * that has a different variant than a preferred locale , * otherwise false . * @ return Returns the best match or the default locale if nothing matches . */ public static Locale resolveLocale ( Iterator < Locale > preferredLocales , Collection < Locale > supportedLocales , Locale defaultLocale , boolean countryMatching , boolean strictVariantMatching ) { } }
// No point if there are no options if ( supportedLocales == null || supportedLocales . isEmpty ( ) ) { return defaultLocale ; } else if ( preferredLocales == null || ! preferredLocales . hasNext ( ) ) { return defaultLocale ; } /* Use this as last fallback before using default locale */ final List < Locale > countryMatchingLocales = new ArrayList < Locale > ( ) ; do { final Locale preferredLocale = preferredLocales . next ( ) ; if ( supportedLocales . contains ( preferredLocale ) || ( defaultLocale != null && defaultLocale . equals ( preferredLocale ) ) ) { /* If we have a perfect match , just return it */ return preferredLocale ; } final String preferredLocaleLanguage = preferredLocale . getLanguage ( ) ; final String preferredLocaleCountry = preferredLocale . getCountry ( ) ; /* Skip locales that have no language */ if ( empty ( preferredLocaleLanguage ) ) { continue ; } Locale lastLanguageMatch = null , lastLanguageAndCountryMatch = null ; boolean languageMatching = false ; for ( Locale supportedLocale : supportedLocales ) { if ( supportedLocale != null ) { if ( equal ( preferredLocaleLanguage , supportedLocale . getLanguage ( ) ) ) { languageMatching = true ; if ( equal ( supportedLocale . getCountry ( ) , preferredLocaleCountry ) ) { /* * the country of the language matching locale * matches the preferred locale country */ if ( lastLanguageAndCountryMatch == null && ( ! strictVariantMatching || empty ( supportedLocale . getVariant ( ) ) ) ) { /* * Use the first language and country matching * locale if strict variant matching is turned * off or it has no variant */ lastLanguageAndCountryMatch = supportedLocale ; } else if ( lastLanguageAndCountryMatch != null && empty ( supportedLocale . getVariant ( ) ) ) { /* * Only use language and country matching locale * if it has no variant */ lastLanguageAndCountryMatch = supportedLocale ; } } else if ( lastLanguageMatch == null || empty ( supportedLocale . getCountry ( ) ) ) { /* Use a language only matching locale */ lastLanguageMatch = supportedLocale ; } } else if ( countryMatching && ! languageMatching && equal ( preferredLocaleCountry , supportedLocale . getCountry ( ) ) ) { /* * We only care about country matches if no language * matches can be found , since a language match is much * more worth . This is a optimization concerning memory * consumption . Preserve country match if no perfect * match can be found */ countryMatchingLocales . add ( supportedLocale ) ; } } } /* Return language match if we have one */ if ( lastLanguageAndCountryMatch != null || lastLanguageMatch != null ) { return lastLanguageAndCountryMatch != null ? lastLanguageAndCountryMatch : lastLanguageMatch ; } } while ( preferredLocales . hasNext ( ) ) ; /* This is our best bet , default locale may be even worse */ if ( countryMatching && ! countryMatchingLocales . isEmpty ( ) ) { return countryMatchingLocales . get ( 0 ) ; } /* Could not find any better match , so just use the default */ return defaultLocale ;
public class AmazonRoute53Client { /** * Updates the resource record sets in a specified hosted zone that were created based on the settings in a * specified traffic policy version . * When you update a traffic policy instance , Amazon Route 53 continues to respond to DNS queries for the root * resource record set name ( such as example . com ) while it replaces one group of resource record sets with another . * Route 53 performs the following operations : * < ol > * < li > * Route 53 creates a new group of resource record sets based on the specified traffic policy . This is true * regardless of how significant the differences are between the existing resource record sets and the new resource * record sets . * < / li > * < li > * When all of the new resource record sets have been created , Route 53 starts to respond to DNS queries for the * root resource record set name ( such as example . com ) by using the new resource record sets . * < / li > * < li > * Route 53 deletes the old group of resource record sets that are associated with the root resource record set * name . * < / li > * < / ol > * @ param updateTrafficPolicyInstanceRequest * A complex type that contains information about the resource record sets that you want to update based on a * specified traffic policy instance . * @ return Result of the UpdateTrafficPolicyInstance operation returned by the service . * @ throws InvalidInputException * The input is not valid . * @ throws NoSuchTrafficPolicyException * No traffic policy exists with the specified ID . * @ throws NoSuchTrafficPolicyInstanceException * No traffic policy instance exists with the specified ID . * @ throws PriorRequestNotCompleteException * If Amazon Route 53 can ' t process a request before the next request arrives , it will reject subsequent * requests for the same hosted zone and return an < code > HTTP 400 error < / code > ( < code > Bad request < / code > ) . * If Route 53 returns this error repeatedly for the same request , we recommend that you wait , in intervals * of increasing duration , before you try the request again . * @ throws ConflictingTypesException * You tried to update a traffic policy instance by using a traffic policy version that has a different DNS * type than the current type for the instance . You specified the type in the JSON document in the * < code > CreateTrafficPolicy < / code > or < code > CreateTrafficPolicyVersion < / code > request . * @ sample AmazonRoute53 . UpdateTrafficPolicyInstance * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / route53-2013-04-01 / UpdateTrafficPolicyInstance " * target = " _ top " > AWS API Documentation < / a > */ @ Override public UpdateTrafficPolicyInstanceResult updateTrafficPolicyInstance ( UpdateTrafficPolicyInstanceRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeUpdateTrafficPolicyInstance ( request ) ;
public class ConfigRepository { /** * Create a new commit reflecting the provided properties , removing any property not mentioned here * @ param name * @ param email * @ param data * @ param message */ public void setAll ( final String name , final String email , final Map < String , Map < String , ConfigPropertyValue > > data , final String message ) { } }
set ( name , email , data , ConfigChangeMode . WIPE_ALL , message ) ;
public class MarkdownParser { /** * Verify that JSON entities contain the required fields and that entity indices are correct . */ private void validateEntities ( JsonNode data ) throws InvalidInputException { } }
for ( JsonNode node : data . findParents ( INDEX_START ) ) { for ( String key : new String [ ] { INDEX_START , INDEX_END , ID , TYPE } ) { if ( node . path ( key ) . isMissingNode ( ) ) { throw new InvalidInputException ( "Required field \"" + key + "\" missing from the entity payload" ) ; } } int startIndex = node . get ( INDEX_START ) . intValue ( ) ; int endIndex = node . get ( INDEX_END ) . intValue ( ) ; if ( endIndex <= startIndex ) { throw new InvalidInputException ( String . format ( "Invalid entity payload: %s (start index: %s, end index: %s)" , node . get ( ID ) . textValue ( ) , startIndex , endIndex ) ) ; } }
public class Bip44WalletUtils { /** * Generates a BIP - 44 compatible Ethereum wallet on top of BIP - 39 generated seed . * @ param password Will be used for both wallet encryption and passphrase for BIP - 39 seed * @ param destinationDirectory The directory containing the wallet * @ return A BIP - 39 compatible Ethereum wallet * @ throws CipherException if the underlying cipher is not available * @ throws IOException if the destination cannot be written to */ public static Bip39Wallet generateBip44Wallet ( String password , File destinationDirectory ) throws CipherException , IOException { } }
return generateBip44Wallet ( password , destinationDirectory , false ) ;
public class Prefs { /** * Removed all the stored keys and values . * @ return the { @ link Editor } for chaining . The changes have already been committed / applied * through the execution of this method . * @ see android . content . SharedPreferences . Editor # clear ( ) */ public static Editor clear ( ) { } }
final Editor editor = getPreferences ( ) . edit ( ) . clear ( ) ; editor . apply ( ) ; return editor ;
public class ReviewsImpl { /** * Publish video review to make it available for review . * @ param teamName Your team name . * @ param reviewId Id of the review . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws APIErrorException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent */ public void publishVideoReview ( String teamName , String reviewId ) { } }
publishVideoReviewWithServiceResponseAsync ( teamName , reviewId ) . toBlocking ( ) . single ( ) . body ( ) ;
public class VarNamePattern { /** * Returns true if this pattern is valid . */ public boolean isValid ( ) { } }
// Must be non - empty . . . boolean valid = varNamePath_ != null ; if ( valid ) { // Containing a sequence of identifiers . . . int i ; for ( i = 0 ; i < varNamePath_ . length && DefUtils . isIdentifier ( varNamePath_ [ i ] ) ; i ++ ) ; // Optionally terminated by a single wildcard . int membersLeft = varNamePath_ . length - i ; valid = membersLeft == 0 || ( membersLeft == 1 && ( varNamePath_ [ i ] . equals ( ALL_DESCENDANTS ) || varNamePath_ [ i ] . equals ( ALL_CHILDREN ) ) ) ; } return valid ;
public class P1_QueryOp { /** * 查询列表 * @ param clazz * @ param selectOnlyKey 是否只查询主键 , 只查询主键时 , 拦截器不进行拦截 , RelatedColumn也不处理 * @ param withCount 是否计算总数 , 将使用SQL _ CALC _ FOUND _ ROWS配合select FOUND _ ROWS ( ) ; 来查询 * @ param offset 从0开始 , null时不生效 ; 当offset不为null时 , 要求limit存在 * @ param limit null时不生效 * @ param postSql sql的where / group / order等sql语句 * @ param args 参数 * @ return */ @ SuppressWarnings ( { } }
"unchecked" , "rawtypes" } ) private < T > PageData < T > _getPage ( Class < T > clazz , boolean selectOnlyKey , boolean withCount , Integer offset , Integer limit , String postSql , Object ... args ) { StringBuilder sql = new StringBuilder ( ) ; sql . append ( SQLUtils . getSelectSQL ( clazz , selectOnlyKey , withCount ) ) ; sql . append ( SQLUtils . autoSetSoftDeleted ( postSql , clazz ) ) ; sql . append ( SQLUtils . genLimitSQL ( offset , limit ) ) ; log ( sql ) ; List < Object > argsList = new ArrayList < Object > ( ) ; // 不要直接用Arrays . asList , 它不支持clear方法 if ( args != null ) { argsList . addAll ( Arrays . asList ( args ) ) ; } if ( ! selectOnlyKey ) { doInterceptBeforeQuery ( clazz , sql , argsList ) ; } long start = System . currentTimeMillis ( ) ; List < T > list ; if ( argsList . isEmpty ( ) ) { list = namedParameterJdbcTemplate . query ( sql . toString ( ) , new AnnotationSupportRowMapper ( clazz , selectOnlyKey ) ) ; // 因为有in ( ? ) 所以用namedParameterJdbcTemplate } else { list = namedParameterJdbcTemplate . query ( NamedParameterUtils . trans ( sql . toString ( ) , argsList ) , NamedParameterUtils . transParam ( argsList ) , new AnnotationSupportRowMapper ( clazz , selectOnlyKey ) ) ; // 因为有in ( ? ) 所以用namedParameterJdbcTemplate } int total = - 1 ; // -1 表示没有查询总数 , 未知 if ( withCount ) { // 注意 : 必须在查询完列表之后马上查询总数 total = jdbcTemplate . queryForObject ( "select FOUND_ROWS()" , Integer . class ) ; } if ( ! selectOnlyKey ) { postHandleRelatedColumn ( list ) ; } long cost = System . currentTimeMillis ( ) - start ; logSlow ( cost , sql . toString ( ) , argsList ) ; if ( ! selectOnlyKey ) { doInteceptAfterQueryList ( clazz , list , total , sql , argsList ) ; } PageData < T > pageData = new PageData < T > ( ) ; pageData . setData ( list ) ; pageData . setTotal ( total ) ; if ( limit != null ) { pageData . setPageSize ( limit ) ; } return pageData ;
public class CommerceShipmentPersistenceImpl { /** * Returns a range of all the commerce shipments . * Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link CommerceShipmentModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order . * @ param start the lower bound of the range of commerce shipments * @ param end the upper bound of the range of commerce shipments ( not inclusive ) * @ return the range of commerce shipments */ @ Override public List < CommerceShipment > findAll ( int start , int end ) { } }
return findAll ( start , end , null ) ;
public class DropwizardMetricRegistry { /** * Creates a named { @ link Timer } that wraps a Dropwizard Metrics { @ link * com . codahale . metrics . Timer } . * @ param name * @ return a { @ link Timer } that wraps a Dropwizard Metrics { @ link com . codahale . metrics . Timer } */ @ Override public Timer timer ( String name ) { } }
final com . codahale . metrics . Timer timer = registry . timer ( name ) ; return new Timer ( ) { @ Override public Timer . Context time ( ) { final com . codahale . metrics . Timer . Context timerContext = timer . time ( ) ; return new Context ( ) { @ Override public void close ( ) { timerContext . close ( ) ; } } ; } @ Override public void update ( long duration , TimeUnit unit ) { timer . update ( duration , unit ) ; } } ;
public class AIStreamIterator { /** * / * ( non - Javadoc ) * @ see java . util . Iterator # next ( ) */ public Object next ( ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "next" ) ; Long ticks ; RemoteMessageRequest remoteMessageRequest ; if ( msgIterator . hasNext ( ) ) { ticks = ( Long ) msgIterator . next ( ) ; remoteMessageRequest = new RemoteMessageRequest ( ticks . longValue ( ) , aiStream ) ; } else { remoteMessageRequest = null ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "next" , remoteMessageRequest ) ; return remoteMessageRequest ;
public class DeepEquals { /** * Compare two objects with a ' deep ' comparison . This will traverse the * Object graph and perform either a field - by - field comparison on each * object ( if not . equals ( ) method has been overridden from Object ) , or it * will call the customized . equals ( ) method if it exists . This method will * allow object graphs loaded at different times ( with different object ids ) * to be reliably compared . Object . equals ( ) / Object . hashCode ( ) rely on the * object ' s identity , which would not consider to equivalent objects necessarily * equals . This allows graphs containing instances of Classes that did no * overide . equals ( ) / . hashCode ( ) to be compared . For example , testing for * existence in a cache . Relying on an objects identity will not locate an * object in cache , yet relying on it being equivalent will . < br > < br > * This method will handle cycles correctly , for example A - & gt ; B - & gt ; C - & gt ; A . Suppose a and * a ' are two separate instances of the A with the same values for all fields on * A , B , and C . Then a . deepEquals ( a ' ) will return true . It uses cycle detection * storing visited objects in a Set to prevent endless loops . * @ param a Object one to compare * @ param b Object two to compare * @ param options Map options for compare . With no option , if a custom equals ( ) * method is present , it will be used . If IGNORE _ CUSTOM _ EQUALS is * present , it will be expected to be a Set of classes to ignore . * It is a black - list of classes that will not be compared * using . equals ( ) even if the classes have a custom . equals ( ) method * present . If it is and empty set , then no custom . equals ( ) methods * will be called . * @ return true if a is equivalent to b , false otherwise . Equivalent means that * all field values of both subgraphs are the same , either at the field level * or via the respectively encountered overridden . equals ( ) methods during * traversal . */ public static boolean deepEquals ( Object a , Object b , Map options ) { } }
Set < DualKey > visited = new HashSet < > ( ) ; Deque < DualKey > stack = new LinkedList < > ( ) ; Set < String > ignoreCustomEquals = ( Set < String > ) options . get ( IGNORE_CUSTOM_EQUALS ) ; stack . addFirst ( new DualKey ( a , b ) ) ; while ( ! stack . isEmpty ( ) ) { DualKey dualKey = stack . removeFirst ( ) ; visited . add ( dualKey ) ; if ( dualKey . _key1 == dualKey . _key2 ) { // Same instance is always equal to itself . continue ; } if ( dualKey . _key1 == null || dualKey . _key2 == null ) { // If either one is null , not equal ( both can ' t be null , due to above comparison ) . return false ; } if ( dualKey . _key1 instanceof Double && compareFloatingPointNumbers ( dualKey . _key1 , dualKey . _key2 , doubleEplison ) ) { continue ; } if ( dualKey . _key1 instanceof Float && compareFloatingPointNumbers ( dualKey . _key1 , dualKey . _key2 , floatEplison ) ) { continue ; } Class key1Class = dualKey . _key1 . getClass ( ) ; if ( key1Class . isPrimitive ( ) || prims . contains ( key1Class ) || dualKey . _key1 instanceof String || dualKey . _key1 instanceof Date || dualKey . _key1 instanceof Class ) { if ( ! dualKey . _key1 . equals ( dualKey . _key2 ) ) { return false ; } continue ; // Nothing further to push on the stack } if ( dualKey . _key1 instanceof Collection ) { // If Collections , they both must be Collection if ( ! ( dualKey . _key2 instanceof Collection ) ) { return false ; } } else if ( dualKey . _key2 instanceof Collection ) { // They both must be Collection return false ; } if ( dualKey . _key1 instanceof SortedSet ) { if ( ! ( dualKey . _key2 instanceof SortedSet ) ) { return false ; } } else if ( dualKey . _key2 instanceof SortedSet ) { return false ; } if ( dualKey . _key1 instanceof SortedMap ) { if ( ! ( dualKey . _key2 instanceof SortedMap ) ) { return false ; } } else if ( dualKey . _key2 instanceof SortedMap ) { return false ; } if ( dualKey . _key1 instanceof Map ) { if ( ! ( dualKey . _key2 instanceof Map ) ) { return false ; } } else if ( dualKey . _key2 instanceof Map ) { return false ; } if ( ! isContainerType ( dualKey . _key1 ) && ! isContainerType ( dualKey . _key2 ) && ! key1Class . equals ( dualKey . _key2 . getClass ( ) ) ) { // Must be same class return false ; } // Handle all [ ] types . In order to be equal , the arrays must be the same // length , be of the same type , be in the same order , and all elements within // the array must be deeply equivalent . if ( key1Class . isArray ( ) ) { if ( ! compareArrays ( dualKey . _key1 , dualKey . _key2 , stack , visited ) ) { return false ; } continue ; } // Special handle SortedSets because they are fast to compare because their // elements must be in the same order to be equivalent Sets . if ( dualKey . _key1 instanceof SortedSet ) { if ( ! compareOrderedCollection ( ( Collection ) dualKey . _key1 , ( Collection ) dualKey . _key2 , stack , visited ) ) { return false ; } continue ; } // Handled unordered Sets . This is a slightly more expensive comparison because order cannot // be assumed , a temporary Map must be created , however the comparison still runs in O ( N ) time . if ( dualKey . _key1 instanceof Set ) { if ( ! compareUnorderedCollection ( ( Collection ) dualKey . _key1 , ( Collection ) dualKey . _key2 , stack , visited ) ) { return false ; } continue ; } // Check any Collection that is not a Set . In these cases , element order // matters , therefore this comparison is faster than using unordered comparison . if ( dualKey . _key1 instanceof Collection ) { if ( ! compareOrderedCollection ( ( Collection ) dualKey . _key1 , ( Collection ) dualKey . _key2 , stack , visited ) ) { return false ; } continue ; } // Compare two SortedMaps . This takes advantage of the fact that these // Maps can be compared in O ( N ) time due to their ordering . if ( dualKey . _key1 instanceof SortedMap ) { if ( ! compareSortedMap ( ( SortedMap ) dualKey . _key1 , ( SortedMap ) dualKey . _key2 , stack , visited ) ) { return false ; } continue ; } // Compare two Unordered Maps . This is a slightly more expensive comparison because // order cannot be assumed , therefore a temporary Map must be created , however the // comparison still runs in O ( N ) time . if ( dualKey . _key1 instanceof Map ) { if ( ! compareUnorderedMap ( ( Map ) dualKey . _key1 , ( Map ) dualKey . _key2 , stack , visited ) ) { return false ; } continue ; } // If there is a custom equals . . . AND // the caller has not specified any classes to skip . . . OR // the caller has specified come classes to ignore and this one is not in the list . . . THEN // compare using the custom equals . if ( hasCustomEquals ( key1Class ) ) { if ( ignoreCustomEquals == null || ( ignoreCustomEquals . size ( ) > 0 && ! ignoreCustomEquals . contains ( key1Class ) ) ) { if ( ! dualKey . _key1 . equals ( dualKey . _key2 ) ) { return false ; } continue ; } } Collection < Field > fields = ReflectionUtils . getDeepDeclaredFields ( key1Class ) ; for ( Field field : fields ) { try { DualKey dk = new DualKey ( field . get ( dualKey . _key1 ) , field . get ( dualKey . _key2 ) ) ; if ( ! visited . contains ( dk ) ) { stack . addFirst ( dk ) ; } } catch ( Exception ignored ) { } } } return true ;
public class FindBugsWorker { /** * Run FindBugs on the given collection of resources from same project * ( note : This is currently not thread - safe ) * @ param resources * files or directories which should be on the project classpath . * All resources must belong to the same project , and no one of * the elements can contain another one . Ergo , if the list * contains a project itself , then it must have only one element . * @ throws CoreException */ public void work ( List < WorkItem > resources ) throws CoreException { } }
if ( resources == null || resources . isEmpty ( ) ) { if ( DEBUG ) { FindbugsPlugin . getDefault ( ) . logInfo ( "No resources to analyse for project " + project ) ; } return ; } if ( DEBUG ) { System . out . println ( resources ) ; } st = new StopTimer ( ) ; st . newPoint ( "initPlugins" ) ; // make sure it ' s initialized FindbugsPlugin . applyCustomDetectors ( false ) ; st . newPoint ( "clearMarkers" ) ; // clear markers clearMarkers ( resources ) ; st . newPoint ( "configureOutputFiles" ) ; final Project findBugsProject = new Project ( ) ; findBugsProject . setProjectName ( javaProject . getElementName ( ) ) ; final Reporter bugReporter = new Reporter ( javaProject , findBugsProject , monitor ) ; if ( FindBugsConsole . getConsole ( ) != null ) { bugReporter . setReportingStream ( FindBugsConsole . getConsole ( ) . newOutputStream ( ) ) ; } bugReporter . setPriorityThreshold ( userPrefs . getUserDetectorThreshold ( ) ) ; FindBugs . setHome ( FindbugsPlugin . getFindBugsEnginePluginLocation ( ) ) ; Map < IPath , IPath > outLocations = createOutputLocations ( ) ; // collect all related class / jar / war etc files for analysis collectClassFiles ( resources , outLocations , findBugsProject ) ; // attach source directories ( can be used by some detectors , see // SwitchFallthrough ) configureSourceDirectories ( findBugsProject , outLocations ) ; if ( findBugsProject . getFileCount ( ) == 0 ) { if ( DEBUG ) { FindbugsPlugin . getDefault ( ) . logInfo ( "No resources to analyse for project " + project ) ; } return ; } st . newPoint ( "createAuxClasspath" ) ; String [ ] classPathEntries = createAuxClasspath ( ) ; // add to findbugs classpath for ( String entry : classPathEntries ) { findBugsProject . addAuxClasspathEntry ( entry ) ; } st . newPoint ( "configureProps" ) ; IPreferenceStore store = FindbugsPlugin . getPluginPreferences ( project ) ; boolean cacheClassData = store . getBoolean ( FindBugsConstants . KEY_CACHE_CLASS_DATA ) ; final FindBugs2 findBugs = new FindBugs2Eclipse ( project , cacheClassData , bugReporter ) ; findBugs . setNoClassOk ( true ) ; findBugs . setProject ( findBugsProject ) ; findBugs . setBugReporter ( bugReporter ) ; findBugs . setProgressCallback ( bugReporter ) ; findBugs . setDetectorFactoryCollection ( DetectorFactoryCollection . instance ( ) ) ; // configure detectors . userPrefs . setIncludeFilterFiles ( relativeToAbsolute ( userPrefs . getIncludeFilterFiles ( ) ) ) ; userPrefs . setExcludeFilterFiles ( relativeToAbsolute ( userPrefs . getExcludeFilterFiles ( ) ) ) ; userPrefs . setExcludeBugsFiles ( relativeToAbsolute ( userPrefs . getExcludeBugsFiles ( ) ) ) ; findBugs . setUserPreferences ( userPrefs ) ; // configure extended preferences findBugs . setAnalysisFeatureSettings ( userPrefs . getAnalysisFeatureSettings ( ) ) ; findBugs . setMergeSimilarWarnings ( false ) ; if ( cacheClassData ) { FindBugs2Eclipse . checkClassPathChanges ( findBugs . getProject ( ) . getAuxClasspathEntryList ( ) , project ) ; } st . newPoint ( "runFindBugs" ) ; if ( DEBUG ) { FindbugsPlugin . log ( "Running SpotBugs" ) ; } runFindBugs ( findBugs ) ; if ( DEBUG ) { FindbugsPlugin . log ( "Done running SpotBugs" ) ; } // Merge new results into existing results // if the argument is project , then it ' s not incremental boolean incremental = ! ( resources . get ( 0 ) instanceof IProject ) ; updateBugCollection ( findBugsProject , bugReporter , incremental ) ; st . newPoint ( "done" ) ; st = null ; monitor . done ( ) ;
public class cmppolicy { /** * Use this API to fetch all the cmppolicy resources that are configured on netscaler . */ public static cmppolicy [ ] get ( nitro_service service ) throws Exception { } }
cmppolicy obj = new cmppolicy ( ) ; cmppolicy [ ] response = ( cmppolicy [ ] ) obj . get_resources ( service ) ; return response ;
public class CanalServerWithEmbedded { /** * 取消订阅 */ @ Override public void unsubscribe ( ClientIdentity clientIdentity ) throws CanalServerException { } }
CanalInstance canalInstance = canalInstances . get ( clientIdentity . getDestination ( ) ) ; canalInstance . getMetaManager ( ) . unsubscribe ( clientIdentity ) ; // 执行一下meta订阅 logger . info ( "unsubscribe successfully, {}" , clientIdentity ) ;
public class AbstractProxySessionManager { /** * For testing */ public final long getSessionAcquireCount ( RaftGroupId groupId , long sessionId ) { } }
SessionState session = sessions . get ( groupId ) ; return session != null && session . id == sessionId ? session . acquireCount . get ( ) : 0 ;
public class SwipeActionAdapter { /** * SwipeActionTouchListener . ActionCallbacks callback * We just link it through to our own interface * @ param listView The originating { @ link ListView } . * @ param position The positions to perform the action on , sorted in descending order * for convenience . * @ param direction The type of swipe that triggered the action . */ @ Override public void onAction ( ListView listView , int [ ] position , SwipeDirection [ ] direction ) { } }
if ( mSwipeActionListener != null ) mSwipeActionListener . onSwipe ( position , direction ) ;
public class StylesContainer { /** * Write the various styles in the automatic styles . * @ param util an XML util * @ param appendable the destination * @ throws IOException if the styles can ' t be written */ public void writeContentAutomaticStyles ( final XMLUtil util , final Appendable appendable ) throws IOException { } }
final Iterable < ObjectStyle > styles = this . objectStylesContainer . getValues ( Dest . CONTENT_AUTOMATIC_STYLES ) ; for ( final ObjectStyle style : styles ) assert style . isHidden ( ) : style . toString ( ) ; this . write ( styles , util , appendable ) ;
public class BaseMatchMethodPermutationBuilder { /** * Returns the statement arguments for the match method matcher statement . */ protected static Object [ ] getMatcherStatementArgs ( int matchedCount ) { } }
TypeName matcher = ParameterizedTypeName . get ( ClassName . get ( Matcher . class ) , TypeName . OBJECT ) ; TypeName listOfMatchers = ParameterizedTypeName . get ( ClassName . get ( List . class ) , matcher ) ; TypeName lists = TypeName . get ( Lists . class ) ; TypeName argumentMatchers = TypeName . get ( ArgumentMatchers . class ) ; return Stream . concat ( ImmutableList . of ( listOfMatchers , lists ) . stream ( ) , IntStream . range ( 0 , matchedCount ) . mapToObj ( i -> argumentMatchers ) ) . toArray ( s -> new TypeName [ s ] ) ;
public class RaftNodeImpl { /** * Takes a snapshot if { @ code commitIndex } advanced equal to or more than * { @ link RaftAlgorithmConfig # getCommitIndexAdvanceCountToSnapshot ( ) } . * Snapshot is not created if there ' s an ongoing membership change or Raft group is being destroyed . */ private void takeSnapshotIfCommitIndexAdvanced ( ) { } }
long commitIndex = state . commitIndex ( ) ; if ( ( commitIndex - state . log ( ) . snapshotIndex ( ) ) < commitIndexAdvanceCountToSnapshot ) { return ; } if ( isTerminatedOrSteppedDown ( ) ) { // If the status is UPDATING _ MEMBER _ LIST or TERMINATING , it means the status is normally ACTIVE // and there is an appended but not committed RaftGroupCmd . // If the status is TERMINATED or STEPPED _ DOWN , then there will not be any new appends . return ; } RaftLog log = state . log ( ) ; Object snapshot = raftIntegration . takeSnapshot ( commitIndex ) ; if ( snapshot instanceof Throwable ) { Throwable t = ( Throwable ) snapshot ; logger . severe ( "Could not take snapshot at commit index: " + commitIndex , t ) ; return ; } int snapshotTerm = log . getLogEntry ( commitIndex ) . term ( ) ; RaftGroupMembers members = state . committedGroupMembers ( ) ; SnapshotEntry snapshotEntry = new SnapshotEntry ( snapshotTerm , commitIndex , snapshot , members . index ( ) , members . members ( ) ) ; long minMatchIndex = 0L ; LeaderState leaderState = state . leaderState ( ) ; if ( leaderState != null ) { long [ ] indices = leaderState . matchIndices ( ) ; // Last slot is reserved for leader index , // and always zero . That ' s why we are skipping it . Arrays . sort ( indices , 0 , indices . length - 1 ) ; minMatchIndex = indices [ 0 ] ; } long truncateLogsUpToIndex = max ( commitIndex - maxNumberOfLogsToKeepAfterSnapshot , minMatchIndex ) ; int truncated = log . setSnapshot ( snapshotEntry , truncateLogsUpToIndex ) ; if ( logger . isFineEnabled ( ) ) { logger . fine ( snapshotEntry + " is taken, " + truncated + " entries are truncated." ) ; }
public class Statistics { /** * Get the estimated saving of this code in bytes when RAIDing is done * @ return The saving in bytes */ public long getDoneSaving ( Configuration conf ) { } }
try { DFSClient dfs = ( ( DistributedFileSystem ) FileSystem . get ( conf ) ) . getClient ( ) ; Counters raidedCounters = stateToSourceCounters . get ( RaidState . RAIDED ) ; Counters shouldRaidCounters = stateToSourceCounters . get ( RaidState . NOT_RAIDED_BUT_SHOULD ) ; long physical = estimatedDoneSourceSize + estimatedDoneParitySize ; long logical = raidedCounters . getNumLogical ( ) + shouldRaidCounters . getNumLogical ( ) ; return logical * dfs . getDefaultReplication ( ) - physical ; } catch ( Exception e ) { return - 1 ; }
public class CopyCallable { /** * Performs the copy of an Amazon S3 object from source bucket to * destination bucket as multiple copy part requests . The information about * the part to be copied is specified in the request as a byte range * ( first - last ) * @ throws Exception * Any Exception that occurs while carrying out the request . */ private void copyInParts ( ) throws Exception { } }
multipartUploadId = initiateMultipartUpload ( copyObjectRequest ) ; long optimalPartSize = getOptimalPartSize ( metadata . getContentLength ( ) ) ; try { CopyPartRequestFactory requestFactory = new CopyPartRequestFactory ( copyObjectRequest , multipartUploadId , optimalPartSize , metadata . getContentLength ( ) ) ; copyPartsInParallel ( requestFactory ) ; } catch ( Exception e ) { publishProgress ( listenerChain , ProgressEventType . TRANSFER_FAILED_EVENT ) ; abortMultipartCopy ( ) ; throw new RuntimeException ( "Unable to perform multipart copy" , e ) ; }
public class AsteriskQueueImpl { /** * Returns a member by its location . * @ param location ot the member * @ return the member by its location . */ AsteriskQueueMemberImpl getMember ( String location ) { } }
synchronized ( members ) { if ( members . containsKey ( location ) ) { return members . get ( location ) ; } } return null ;
public class FormatUtils { /** * Parses a byte array into a hex string where each byte is represented in the * format { @ code % 02x } . * @ param bytes the byte array to be transformed * @ param prefix the prefix to use * @ param separator the separator to use * @ return the string representation of the byte array */ public static String byteArrayToHexString ( byte [ ] bytes , String prefix , String separator ) { } }
StringBuilder sb = new StringBuilder ( ) ; for ( int i = 0 ; i < bytes . length ; i ++ ) { sb . append ( String . format ( "%s%02x" , prefix , bytes [ i ] ) ) ; if ( i != bytes . length - 1 ) { sb . append ( separator ) ; } } return sb . toString ( ) ;
public class PropertyController { /** * Deletes the value of a property . * @ param entityType Type of the entity to edit * @ param id ID of the entity to edit * @ param propertyTypeName Fully qualified name of the property to delete */ @ RequestMapping ( value = "{entityType}/{id}/{propertyTypeName}/edit" , method = RequestMethod . DELETE ) @ ResponseStatus ( HttpStatus . ACCEPTED ) public Ack deleteProperty ( @ PathVariable ProjectEntityType entityType , @ PathVariable ID id , @ PathVariable String propertyTypeName ) { } }
return propertyService . deleteProperty ( getEntity ( entityType , id ) , propertyTypeName ) ;
public class CreateLoadBalancer { /** * Creates an Application Load Balancer . * To create listeners for your load balancer , use CreateListener . You can add security groups , subnets , and tags when * you create your load balancer , or you can add them later using SetSecurityGroups , SetSubnets , and AddTags . To describe * your current load balancers , see DescribeLoadBalancers . When you are finished with a load balancer , you can delete * it using DeleteLoadBalancer . You can create up to 20 load balancers per region per account . You can request an * increase for the number of load balancers for your account . For more information , see Limits for Your Application * Load Balancer in the Application Load Balancers Guide . * For more information , see http : / / docs . aws . amazon . com / elasticloadbalancing / latest / application / application - load - balancers . html * @ param endpoint Optional - Endpoint to which request will be sent . * Default : " https : / / elasticloadbalancing . amazonaws . com " * @ param identity ID of the secret access key associated with your Amazon AWS or IAM account . * Example : " AKIAIOSFODNN7EXAMPLE " * @ param credential Secret access key associated with your Amazon AWS or IAM account . * Example : " wJalrXUtnFEMI / K7MDENG / bPxRfiCYEXAMPLEKEY " * @ param proxyHost Optional - proxy server used to connect to Amazon API . If empty no proxy will * be used . * @ param proxyPort Optional - proxy server port . You must either specify values for both proxyHost * and proxyPort inputs or leave them both empty . * @ param proxyUsername Optional - proxy server user name . * Default : " " * @ param proxyPassword Optional - proxy server password associated with the proxyUsername input value . * @ param headers Optional - string containing the headers to use for the request separated by new * line ( CRLF ) . The header name - value pair will be separated by " : " * Format : Conforming with HTTP standard for headers ( RFC 2616) * Examples : " Accept : text / plain " * Default : " " * @ param queryParams Optional - string containing query parameters that will be appended to the URL . * The names and the values must not be URL encoded because if they are encoded then * a double encoded will occur . The separator between name - value pairs is " & " symbol . * The query name will be separated from query value by " = " * Examples : " parameterName1 = parameterValue1 & parameterName2 = parameterValue2" * Default : " " * @ param version Optional - Version of the web service to made the call against it . * Example : " 2015-12-01" * Default : " 2015-12-01" * @ param loadBalancerName Name of the load balancer . This name must be unique within your AWS account , can have * a maximum of 32 characters , must contain only alphanumeric characters or hyphens , and * must not begin or end with a hyphen . * @ param subnetIdsString String that contains one or more IDs of the subnets to attach to the load balancer . * You can specify only one subnet per Availability Zone . You must specify subnets from * at least two Availability Zones . * @ param scheme Optional - The nodes of an Internet - facing load balancer have public IP addresses . * The DNS name of an Internet - facing load balancer is publicly resolvable to the public * IP addresses of the nodes . Therefore , Internet - facing load balancers can route requests * from clients over the Internet . The nodes of an internal load balancer have only private * IP addresses . The DNS name of an internal load balancer is publicly resolvable to the * private IP addresses of the nodes . Therefore , internal load balancers can only route * requests from clients with access to the VPC for the load balancer . The default is * an Internet - facing load balancer . * Valid values : " internet - facing " , " internal " * Default : " internet - facing " * @ param securityGroupIdsString Optional - String that contains one or more IDs of the security groups to assign to * the load balancer . * Default : " " * @ param keyTagsString String that contains one or more key tags separated by delimiter . * Length constraint tag key : minimum length of 1 , maximum length of 128. * Pattern : ^ ( [ \ p { L } \ p { Z } \ p { N } _ . : / = + \ - @ ] * ) $ * Default : " " * @ param valueTagsString String that contains one or more tag values separated by delimiter . The value parameter * Length constraint tag value : minimum length of 0 , maximum length of 256. * Pattern : ^ ( [ \ p { L } \ p { Z } \ p { N } _ . : / = + \ - @ ] * ) $ * Default : " " * @ return A map with strings as keys and strings as values that contains : outcome of the action ( or failure message * and the exception if there is one ) , returnCode of the operation and the ID of the request */ @ Action ( name = "Create Load Balancer" , outputs = { } }
@ Output ( OutputNames . RETURN_CODE ) , @ Output ( OutputNames . RETURN_RESULT ) , @ Output ( OutputNames . EXCEPTION ) } , responses = { @ Response ( text = SUCCESS , field = OutputNames . RETURN_CODE , value = SUCCESS_RETURN_CODE , matchType = MatchType . COMPARE_EQUAL , responseType = ResponseType . RESOLVED ) , @ Response ( text = FAILURE , field = OutputNames . RETURN_CODE , value = FAILURE_RETURN_CODE , matchType = MatchType . COMPARE_EQUAL , responseType = ResponseType . ERROR , isOnFail = true ) } ) public Map < String , String > execute ( @ Param ( value = ENDPOINT ) String endpoint , @ Param ( value = IDENTITY , required = true ) String identity , @ Param ( value = CREDENTIAL , required = true , encrypted = true ) String credential , @ Param ( value = PROXY_HOST ) String proxyHost , @ Param ( value = PROXY_PORT ) String proxyPort , @ Param ( value = PROXY_USERNAME ) String proxyUsername , @ Param ( value = PROXY_PASSWORD , encrypted = true ) String proxyPassword , @ Param ( value = HEADERS ) String headers , @ Param ( value = QUERY_PARAMS ) String queryParams , @ Param ( value = VERSION ) String version , @ Param ( value = DELIMITER ) String delimiter , @ Param ( value = LOAD_BALANCER_NAME , required = true ) String loadBalancerName , @ Param ( value = SUBNET_IDS_STRING , required = true ) String subnetIdsString , @ Param ( value = SCHEME ) String scheme , @ Param ( value = SECURITY_GROUP_IDS_STRING ) String securityGroupIdsString , @ Param ( value = KEY_TAGS_STRING ) String keyTagsString , @ Param ( value = VALUE_TAGS_STRING ) String valueTagsString ) { try { version = getDefaultStringInput ( version , LOAD_BALANCER_DEFAULT_API_VERSION ) ; final CommonInputs commonInputs = new CommonInputs . Builder ( ) . withEndpoint ( endpoint , LOAD_BALANCING_API , EMPTY ) . withIdentity ( identity ) . withCredential ( credential ) . withProxyHost ( proxyHost ) . withProxyPort ( proxyPort ) . withProxyUsername ( proxyUsername ) . withProxyPassword ( proxyPassword ) . withHeaders ( headers ) . withQueryParams ( queryParams ) . withVersion ( version ) . withDelimiter ( delimiter ) . withAction ( CREATE_LOAD_BALANCER ) . withApiService ( LOAD_BALANCING_API ) . withRequestUri ( EMPTY ) . withRequestPayload ( EMPTY ) . withHttpClientMethod ( HTTP_CLIENT_METHOD_GET ) . build ( ) ; final CustomInputs customInputs = new CustomInputs . Builder ( ) . withKeyTagsString ( keyTagsString ) . withValueTagsString ( valueTagsString ) . build ( ) ; final IamInputs iamInputs = new IamInputs . Builder ( ) . withSecurityGroupIdsString ( securityGroupIdsString ) . build ( ) ; final LoadBalancerInputs loadBalancerInputs = new LoadBalancerInputs . Builder ( ) . withLoadBalancerName ( loadBalancerName ) . withScheme ( scheme ) . build ( ) ; final NetworkInputs networkInputs = new NetworkInputs . Builder ( ) . withSubnetIdsString ( subnetIdsString ) . build ( ) ; return new QueryApiExecutor ( ) . execute ( commonInputs , customInputs , iamInputs , loadBalancerInputs , networkInputs ) ; } catch ( Exception exception ) { return ExceptionProcessor . getExceptionResult ( exception ) ; }
public class StyleUtilities { /** * Converts a style to its string representation to be written to file . * @ param style the style to convert . * @ return the style string . * @ throws Exception */ public static String styleToString ( Style style ) throws Exception { } }
StyledLayerDescriptor sld = sf . createStyledLayerDescriptor ( ) ; UserLayer layer = sf . createUserLayer ( ) ; layer . setLayerFeatureConstraints ( new FeatureTypeConstraint [ ] { null } ) ; sld . addStyledLayer ( layer ) ; layer . addUserStyle ( style ) ; SLDTransformer aTransformer = new SLDTransformer ( ) ; aTransformer . setIndentation ( 4 ) ; String xml = aTransformer . transform ( sld ) ; return xml ;
public class SipStack { /** * FOR INTERNAL USE ONLY . Not to be used by a test program . */ public void processTimeout ( TimeoutEvent arg0 ) { } }
synchronized ( listeners ) { Iterator < SipListener > iter = listeners . iterator ( ) ; while ( iter . hasNext ( ) == true ) { SipListener listener = ( SipListener ) iter . next ( ) ; listener . processTimeout ( arg0 ) ; } }
public class TSSyntax { /** * Create a TS * Syntax class around the provided ClassTemplate . * That class will perform the heavy lifting of rendering TS - specific strings into the template . * @ param template the ClassTemplate * @ return a TS * Syntax class ( see { @ link TSSyntax } class - level docs for more info ) */ private TSTypeSyntax createTypeSyntax ( ClassTemplateSpec template ) { } }
if ( template instanceof RecordTemplateSpec ) { return new TSRecordSyntax ( ( RecordTemplateSpec ) template ) ; } else if ( template instanceof TyperefTemplateSpec ) { return TSTyperefSyntaxCreate ( ( TyperefTemplateSpec ) template ) ; } else if ( template instanceof FixedTemplateSpec ) { return TSFixedSyntaxCreate ( ( FixedTemplateSpec ) template ) ; } else if ( template instanceof EnumTemplateSpec ) { return new TSEnumSyntax ( ( EnumTemplateSpec ) template ) ; } else if ( template instanceof PrimitiveTemplateSpec ) { return new TSPrimitiveTypeSyntax ( ( PrimitiveTemplateSpec ) template ) ; } else if ( template instanceof MapTemplateSpec ) { return new TSMapSyntax ( ( MapTemplateSpec ) template ) ; } else if ( template instanceof ArrayTemplateSpec ) { return new TSArraySyntax ( ( ArrayTemplateSpec ) template ) ; } else if ( template instanceof UnionTemplateSpec ) { return new TSUnionSyntax ( ( UnionTemplateSpec ) template ) ; } else { throw new RuntimeException ( "Unrecognized template spec: " + template + " with schema " + template . getSchema ( ) ) ; }
public class Inject { /** * Returns an injector implementation which uses the given dependency * object ' s type to infer which setter / field to inject . * @ param target * The target object for injection . */ public static IVarargDependencyInjector bean ( final Object target ) { } }
return new AbstractVarargDependencyInjector ( ) { public void with ( Object ... dependencies ) { TypeBasedInjector injector = new TypeBasedInjector ( ) ; for ( Object dependency : dependencies ) { injector . validateInjectionOf ( target , dependency ) ; } for ( Object dependency : dependencies ) { injector . inject ( target , dependency ) ; } } } ;
public class AptControlImplementation { /** * Enforces the VersionRequired annotation for control extensions . */ private void enforceVersionSupported ( ) { } }
if ( _versionSupported != null ) { int majorSupported = _versionSupported . major ( ) ; int minorSupported = _versionSupported . minor ( ) ; if ( majorSupported < 0 ) // no real version support requirement return ; AptControlInterface ci = getControlInterface ( ) ; if ( ci == null ) return ; int majorPresent = - 1 ; int minorPresent = - 1 ; Version ciVersion = ci . getVersion ( ) ; if ( ciVersion != null ) { majorPresent = ciVersion . major ( ) ; minorPresent = ciVersion . minor ( ) ; if ( majorSupported >= majorPresent && ( minorSupported < 0 || minorSupported >= minorPresent ) ) { // Version requirement is satisfied return ; } } // Version requirement failed _ap . printError ( _implDecl , "versionsupported.failed" , _implDecl . getSimpleName ( ) , majorSupported , minorSupported , majorPresent , minorPresent ) ; }
public class CoverSheetBase { /** * Override load list to clear display if no patient in context . */ @ Override protected void loadData ( ) { } }
if ( patient == null ) { asyncAbort ( ) ; reset ( ) ; status ( "No patient selected." ) ; } else { super . loadData ( ) ; } detailView . setValue ( null ) ;
public class RingBufferDiagnostics { /** * Returns the count of all requests in the ringbuffer */ @ InterfaceAudience . Public @ InterfaceStability . Experimental public int totalCount ( ) { } }
int total = countNonService ; for ( Map . Entry < ServiceType , Integer > entry : counts . entrySet ( ) ) { total += entry . getValue ( ) ; } return total ;
public class FunctionUtils { /** * Search for functions in string and replace with respective function result . * @ param str to parse * @ return parsed string result */ public static String replaceFunctionsInString ( String str , TestContext context ) { } }
return replaceFunctionsInString ( str , context , false ) ;
public class WebhookUtils { /** * Utility method to process the webhook notification from { @ link HttpServlet # doPost } . * The { @ link HttpServletRequest # getInputStream ( ) } is closed in a finally block inside this * method . If it is not detected to be a webhook notification , an * { @ link HttpServletResponse # SC _ BAD _ REQUEST } error will be displayed . If the notification channel * is found in the given notification channel data store , it will call * { @ link UnparsedNotificationCallback # onNotification } for the registered notification callback * method . * @ param req an { @ link HttpServletRequest } object that contains the request the client has made * of the servlet * @ param resp an { @ link HttpServletResponse } object that contains the response the servlet sends * to the client * @ param channelDataStore notification channel data store * @ exception IOException if an input or output error is detected when the servlet handles the * request * @ exception ServletException if the request for the POST could not be handled */ public static void processWebhookNotification ( HttpServletRequest req , HttpServletResponse resp , DataStore < StoredChannel > channelDataStore ) throws ServletException , IOException { } }
Preconditions . checkArgument ( "POST" . equals ( req . getMethod ( ) ) ) ; InputStream contentStream = req . getInputStream ( ) ; try { // log headers if ( LOGGER . isLoggable ( Level . CONFIG ) ) { StringBuilder builder = new StringBuilder ( ) ; Enumeration < ? > e = req . getHeaderNames ( ) ; if ( e != null ) { while ( e . hasMoreElements ( ) ) { Object nameObj = e . nextElement ( ) ; if ( nameObj instanceof String ) { String name = ( String ) nameObj ; Enumeration < ? > ev = req . getHeaders ( name ) ; if ( ev != null ) { while ( ev . hasMoreElements ( ) ) { builder . append ( name ) . append ( ": " ) . append ( ev . nextElement ( ) ) . append ( StringUtils . LINE_SEPARATOR ) ; } } } } } LOGGER . config ( builder . toString ( ) ) ; contentStream = new LoggingInputStream ( contentStream , LOGGER , Level . CONFIG , 0x4000 ) ; // TODO ( yanivi ) : allow to override logging content limit } // parse the relevant headers , and create a notification Long messageNumber ; try { messageNumber = Long . valueOf ( req . getHeader ( WebhookHeaders . MESSAGE_NUMBER ) ) ; } catch ( NumberFormatException e ) { messageNumber = null ; } String resourceState = req . getHeader ( WebhookHeaders . RESOURCE_STATE ) ; String resourceId = req . getHeader ( WebhookHeaders . RESOURCE_ID ) ; String resourceUri = req . getHeader ( WebhookHeaders . RESOURCE_URI ) ; String channelId = req . getHeader ( WebhookHeaders . CHANNEL_ID ) ; String channelExpiration = req . getHeader ( WebhookHeaders . CHANNEL_EXPIRATION ) ; String channelToken = req . getHeader ( WebhookHeaders . CHANNEL_TOKEN ) ; String changed = req . getHeader ( WebhookHeaders . CHANGED ) ; if ( messageNumber == null || resourceState == null || resourceId == null || resourceUri == null || channelId == null ) { resp . sendError ( HttpServletResponse . SC_BAD_REQUEST , "Notification did not contain all required information." ) ; return ; } UnparsedNotification notification = new UnparsedNotification ( messageNumber , resourceState , resourceId , resourceUri , channelId ) . setChannelExpiration ( channelExpiration ) . setChannelToken ( channelToken ) . setChanged ( changed ) . setContentType ( req . getContentType ( ) ) . setContentStream ( contentStream ) ; // check if we know about the channel , hand over the notification to the notification callback StoredChannel storedChannel = channelDataStore . get ( notification . getChannelId ( ) ) ; if ( storedChannel != null ) { storedChannel . getNotificationCallback ( ) . onNotification ( storedChannel , notification ) ; } } finally { contentStream . close ( ) ; }
public class AfplibPackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EEnum getTBMDIRCTION ( ) { } }
if ( tbmdirctionEEnum == null ) { tbmdirctionEEnum = ( EEnum ) EPackage . Registry . INSTANCE . getEPackage ( AfplibPackage . eNS_URI ) . getEClassifiers ( ) . get ( 74 ) ; } return tbmdirctionEEnum ;
public class JacksonJsonDataFormat { /** * Identifies the canonical type of an object heuristically . * @ return the canonical type identifier of the object ' s class * according to Jackson ' s type format ( see { @ link TypeFactory # constructFromCanonical ( String ) } ) */ public String getCanonicalTypeName ( Object object ) { } }
ensureNotNull ( "object" , object ) ; for ( TypeDetector typeDetector : typeDetectors ) { if ( typeDetector . canHandle ( object ) ) { return typeDetector . detectType ( object ) ; } } throw LOG . unableToDetectCanonicalType ( object ) ;
public class JFeatureSpec { /** * Java wrapper for { @ link FeatureSpec # extractWithSettings ( String , FeatureBuilder , ClassTag ) . */ public JRecordExtractor < T , SparseLabeledPoint > extractWithSubsetSettingsSparseLabeledPoint ( String settings ) { } }
return new JRecordExtractor < > ( JavaOps . extractWithSubsetSettingsSparseLabeledPoint ( self , settings ) ) ;
public class FailoverWatcher { /** * Deal with create node event , just call the leader election . * @ param path which znode is created */ protected void processNodeCreated ( String path ) { } }
if ( path . equals ( masterZnode ) ) { LOG . info ( masterZnode + " created and try to become active master" ) ; handleMasterNodeChange ( ) ; }
public class AddCrmBasedUserList { /** * Runs the example . * @ param adWordsServices the services factory . * @ param session the session . * @ throws ApiException if the API request failed with one or more service errors . * @ throws RemoteException if the API request failed due to other errors . * @ throws UnsupportedEncodingException if encoding the hashed email failed . */ public static void runExample ( AdWordsServicesInterface adWordsServices , AdWordsSession session ) throws RemoteException , UnsupportedEncodingException { } }
// Get the UserListService . AdwordsUserListServiceInterface userListService = adWordsServices . get ( session , AdwordsUserListServiceInterface . class ) ; // Create a user list . CrmBasedUserList userList = new CrmBasedUserList ( ) ; userList . setName ( "Customer relationship management list #" + System . currentTimeMillis ( ) ) ; userList . setDescription ( "A list of customers that originated from email addresses" ) ; // CRM - based user lists can use a membershipLifeSpan of 10000 to indicate unlimited ; otherwise // normal values apply . userList . setMembershipLifeSpan ( 30L ) ; userList . setUploadKeyType ( CustomerMatchUploadKeyType . CONTACT_INFO ) ; // Create operation . UserListOperation operation = new UserListOperation ( ) ; operation . setOperand ( userList ) ; operation . setOperator ( Operator . ADD ) ; // Add user list . UserListReturnValue result = userListService . mutate ( new UserListOperation [ ] { operation } ) ; // Display user list . UserList userListAdded = result . getValue ( 0 ) ; System . out . printf ( "User list with name '%s' and ID %d was added.%n" , userListAdded . getName ( ) , userListAdded . getId ( ) ) ; // Get user list ID . Long userListId = userListAdded . getId ( ) ; // Create operation to add members to the user list based on email addresses . MutateMembersOperation mutateMembersOperation = new MutateMembersOperation ( ) ; MutateMembersOperand operand = new MutateMembersOperand ( ) ; operand . setUserListId ( userListId ) ; // Hash normalized email addresses based on SHA - 256 hashing algorithm . List < Member > members = new ArrayList < > ( EMAILS . size ( ) ) ; for ( String email : EMAILS ) { String normalizedEmail = toNormalizedString ( email ) ; Member member = new Member ( ) ; member . setHashedEmail ( toSHA256String ( normalizedEmail ) ) ; members . add ( member ) ; } String firstName = "John" ; String lastName = "Doe" ; String countryCode = "US" ; String zipCode = "10011" ; AddressInfo addressInfo = new AddressInfo ( ) ; // First and last name must be normalized and hashed . addressInfo . setHashedFirstName ( toSHA256String ( toNormalizedString ( firstName ) ) ) ; addressInfo . setHashedLastName ( toSHA256String ( toNormalizedString ( lastName ) ) ) ; // Country code and zip code are sent in plaintext . addressInfo . setCountryCode ( countryCode ) ; addressInfo . setZipCode ( zipCode ) ; Member memberByAddress = new Member ( ) ; memberByAddress . setAddressInfo ( addressInfo ) ; members . add ( memberByAddress ) ; operand . setMembersList ( members . toArray ( new Member [ members . size ( ) ] ) ) ; mutateMembersOperation . setOperand ( operand ) ; mutateMembersOperation . setOperator ( Operator . ADD ) ; // Add members to the user list based on email addresses . MutateMembersReturnValue mutateMembersResult = userListService . mutateMembers ( new MutateMembersOperation [ ] { mutateMembersOperation } ) ; // Display results . // Reminder : it may take several hours for the list to be populated with members . for ( UserList userListResult : mutateMembersResult . getUserLists ( ) ) { System . out . printf ( "%d email addresses were uploaded to user list with name '%s' and ID %d " + "and are scheduled for review.%n" , EMAILS . size ( ) , userListResult . getName ( ) , userListResult . getId ( ) ) ; }
public class AbstractLoginManager { /** * Main login routine . * @ param aRequestScope * Request scope * @ param aUnifiedResponse * Response * @ return { @ link EContinue # BREAK } to indicate that no user is logged in and * therefore the login screen should be shown , * { @ link EContinue # CONTINUE } if a user is correctly logged in . */ @ Nonnull public final EContinue checkUserAndShowLogin ( @ Nonnull final IRequestWebScopeWithoutResponse aRequestScope , @ Nonnull final UnifiedResponse aUnifiedResponse ) { } }
final LoggedInUserManager aLoggedInUserManager = LoggedInUserManager . getInstance ( ) ; String sSessionUserID = aLoggedInUserManager . getCurrentUserID ( ) ; boolean bLoggedInInThisRequest = false ; if ( sSessionUserID == null ) { // No user currently logged in - > start login boolean bLoginError = false ; ICredentialValidationResult aLoginResult = ELoginResult . SUCCESS ; // Is the special login - check action present ? if ( isLoginInProgress ( aRequestScope ) ) { // Login screen was already shown // - > Check request parameters final String sLoginName = getLoginName ( aRequestScope ) ; final String sPassword = getPassword ( aRequestScope ) ; // Resolve user - may be null final IUser aUser = getUserOfLoginName ( sLoginName ) ; // Try main login aLoginResult = aLoggedInUserManager . loginUser ( aUser , sPassword , m_aRequiredRoleIDs ) ; if ( aLoginResult . isSuccess ( ) ) { // Credentials are valid - implies that the user was resolved // correctly sSessionUserID = aUser . getID ( ) ; bLoggedInInThisRequest = true ; } else { // Credentials are invalid if ( GlobalDebug . isDebugMode ( ) ) LOGGER . warn ( "Login of '" + sLoginName + "' failed because " + aLoginResult ) ; // Anyway show the error message only if at least some credential // values are passed bLoginError = StringHelper . hasText ( sLoginName ) || StringHelper . hasText ( sPassword ) ; } } if ( sSessionUserID == null ) { // Show login screen as no user is in the session final IHTMLProvider aLoginScreenProvider = createLoginScreen ( bLoginError , aLoginResult ) ; PhotonHTMLHelper . createHTMLResponse ( aRequestScope , aUnifiedResponse , aLoginScreenProvider ) ; } } // Update details final LoginInfo aLoginInfo = aLoggedInUserManager . getLoginInfo ( sSessionUserID ) ; if ( aLoginInfo != null ) { // Update last login info aLoginInfo . setLastAccessDTNow ( ) ; // Set custom attributes modifyLoginInfo ( aLoginInfo , aRequestScope , bLoggedInInThisRequest ) ; } else { // Internal inconsistency if ( sSessionUserID != null ) LOGGER . error ( "Failed to resolve LoginInfo of user ID '" + sSessionUserID + "'" ) ; } if ( bLoggedInInThisRequest ) { // Avoid double submit by simply redirecting to the desired destination // URL without the login parameters aUnifiedResponse . setRedirect ( aRequestScope . getURL ( ) ) ; return EContinue . BREAK ; } // Continue only , if a valid user ID is present return EContinue . valueOf ( sSessionUserID != null ) ;
public class CancelSchemaExtensionRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( CancelSchemaExtensionRequest cancelSchemaExtensionRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( cancelSchemaExtensionRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( cancelSchemaExtensionRequest . getDirectoryId ( ) , DIRECTORYID_BINDING ) ; protocolMarshaller . marshall ( cancelSchemaExtensionRequest . getSchemaExtensionId ( ) , SCHEMAEXTENSIONID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class RedBlackTreeLong { /** * Returns the node containing the key just before the given key . */ public Node < T > getPrevious ( long value ) { } }
if ( value == first . value ) return null ; if ( value == root . value ) { if ( root . left == null ) return null ; Node < T > n = root . left ; while ( n . right != null ) n = n . right ; return n ; } if ( value < root . value ) { if ( root . left == null ) return null ; return getPrevious ( root . left , value ) ; } if ( root . right == null ) return null ; return getPrevious ( root , root . right , value ) ;
public class PolyBind { /** * Sets up a " choice " for the injector to resolve at injection time . * @ param binder the binder for the injector that is being configured * @ param property the property that will be checked to determine the implementation choice * @ param interfaceKey the interface that will be injected using this choice * @ param defaultKey the default instance to be injected if the property doesn ' t match a choice . Can be null * @ param < T > interface type * @ return A ScopedBindingBuilder so that scopes can be added to the binding , if required . */ public static < T > ScopedBindingBuilder createChoice ( Binder binder , String property , Key < T > interfaceKey , Key < ? extends T > defaultKey ) { } }
return createChoiceWithDefault ( binder , property , interfaceKey , defaultKey , null ) ;
public class DataJoinReducerBase { /** * This is the function that re - groups values for a key into sub - groups based * on a secondary key ( input tag ) . * @ param arg1 * @ return */ private SortedMap < Object , ResetableIterator > regroup ( Object key , Iterator arg1 , Reporter reporter ) throws IOException { } }
this . numOfValues = 0 ; SortedMap < Object , ResetableIterator > retv = new TreeMap < Object , ResetableIterator > ( ) ; TaggedMapOutput aRecord = null ; while ( arg1 . hasNext ( ) ) { this . numOfValues += 1 ; if ( this . numOfValues % 100 == 0 ) { reporter . setStatus ( "key: " + key . toString ( ) + " numOfValues: " + this . numOfValues ) ; } if ( this . numOfValues > this . maxNumOfValuesPerGroup ) { continue ; } aRecord = ( ( TaggedMapOutput ) arg1 . next ( ) ) . clone ( job ) ; Text tag = aRecord . getTag ( ) ; ResetableIterator data = retv . get ( tag ) ; if ( data == null ) { data = createResetableIterator ( ) ; retv . put ( tag , data ) ; } data . add ( aRecord ) ; } if ( this . numOfValues > this . largestNumOfValues ) { this . largestNumOfValues = numOfValues ; LOG . info ( "key: " + key . toString ( ) + " this.largestNumOfValues: " + this . largestNumOfValues ) ; } return retv ;
public class Logger { /** * Issue a log message with parameters and a throwable with a level of ERROR . * @ param message the message * @ param params the message parameters * @ param t the throwable * @ deprecated To log a message with parameters , using { @ link # errorv ( Throwable , String , Object . . . ) } is recommended . */ @ Deprecated public void error ( Object message , Object [ ] params , Throwable t ) { } }
doLog ( Level . ERROR , FQCN , message , params , t ) ;
public class Mapper { /** * Create a new Map by using the array objects as keys , and the mapping result as values . Discard keys which return * null values from the mapper . * @ param mapper a Mapper to map the values * @ param a array of items * @ return a new Map with values mapped */ public static Map makeMap ( Mapper mapper , Object [ ] a ) { } }
return makeMap ( mapper , java . util . Arrays . asList ( a ) , false ) ;
public class PlotCanvas { /** * Adds a label to this canvas . */ public void label ( String text , Font font , double ... coord ) { } }
Label label = new Label ( text , coord ) ; label . setFont ( font ) ; add ( label ) ;
public class AmazonKinesisVideoArchivedMediaClient { /** * Gets media for a list of fragments ( specified by fragment number ) from the archived data in an Amazon Kinesis * video stream . * < note > * You must first call the < code > GetDataEndpoint < / code > API to get an endpoint . Then send the * < code > GetMediaForFragmentList < / code > requests to this endpoint using the < a * href = " https : / / docs . aws . amazon . com / cli / latest / reference / " > - - endpoint - url parameter < / a > . * < / note > * The following limits apply when using the < code > GetMediaForFragmentList < / code > API : * < ul > * < li > * A client can call < code > GetMediaForFragmentList < / code > up to five times per second per stream . * < / li > * < li > * Kinesis Video Streams sends media data at a rate of up to 25 megabytes per second ( or 200 megabits per second ) * during a < code > GetMediaForFragmentList < / code > session . * < / li > * < / ul > * @ param getMediaForFragmentListRequest * @ return Result of the GetMediaForFragmentList operation returned by the service . * @ throws ResourceNotFoundException * < code > GetMedia < / code > throws this error when Kinesis Video Streams can ' t find the stream that you * specified . < / p > * < code > GetHLSStreamingSessionURL < / code > throws this error if a session with a < code > PlaybackMode < / code > of * < code > ON _ DEMAND < / code > is requested for a stream that has no fragments within the requested time range , * or if a session with a < code > PlaybackMode < / code > of < code > LIVE < / code > is requested for a stream that has * no fragments within the last 30 seconds . * @ throws InvalidArgumentException * A specified parameter exceeds its restrictions , is not supported , or can ' t be used . * @ throws ClientLimitExceededException * Kinesis Video Streams has throttled the request because you have exceeded the limit of allowed client * calls . Try making the call later . * @ throws NotAuthorizedException * Status Code : 403 , The caller is not authorized to perform an operation on the given stream , or the token * has expired . * @ sample AmazonKinesisVideoArchivedMedia . GetMediaForFragmentList * @ see < a * href = " http : / / docs . aws . amazon . com / goto / WebAPI / kinesis - video - archived - media - 2017-09-30 / GetMediaForFragmentList " * target = " _ top " > AWS API Documentation < / a > */ @ Override public GetMediaForFragmentListResult getMediaForFragmentList ( GetMediaForFragmentListRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeGetMediaForFragmentList ( request ) ;
public class FilePanelService { /** * Checks for matching exclude patterns . */ private static boolean exclude ( String host , java . nio . file . Path path ) { } }
List < PathMatcher > exclusions = getExcludes ( host ) ; if ( exclusions == null && host != null ) { exclusions = getExcludes ( null ) ; // check global config } if ( exclusions != null ) { for ( PathMatcher matcher : exclusions ) { if ( matcher . matches ( path ) ) return true ; } } return false ;
public class MessageBundleControl { /** * ( non - Javadoc ) * @ see java . util . ResourceBundle . Control # getTimeToLive ( java . lang . String , * java . util . Locale ) */ @ Override public long getTimeToLive ( String baseName , Locale locale ) { } }
if ( baseName == null || locale == null ) { throw new NullPointerException ( ) ; } return TTL_DONT_CACHE ;
public class UserPasswordChange { /** * Do the special HTML command . * This gives the screen a chance to change screens for special HTML commands . * You have a chance to change two things : * 1 . The information display line ( this will display on the next screen . . . ie . , submit was successful ) * 2 . The error display line ( if there was an error ) * @ return this or the new screen to display . */ public ScreenModel doServletCommand ( ScreenModel screenParent ) { } }
ScreenModel screen = super . doServletCommand ( screenParent ) ; // Process params from previous screen if ( MenuConstants . SUBMIT . equalsIgnoreCase ( this . getProperty ( DBParams . COMMAND ) ) ) { if ( this . getTask ( ) . getStatusText ( DBConstants . WARNING_MESSAGE ) == null ) { // Normal return = logged in , go to main menu . this . free ( ) ; return null ; // This will cause the main menu to display } else { this . getScreenRecord ( ) . getField ( UserScreenRecord . CURRENT_PASSWORD ) . setData ( null , DBConstants . DISPLAY , DBConstants . INIT_MOVE ) ; this . getScreenRecord ( ) . getField ( UserScreenRecord . NEW_PASSWORD_1 ) . setData ( null , DBConstants . DISPLAY , DBConstants . INIT_MOVE ) ; this . getScreenRecord ( ) . getField ( UserScreenRecord . NEW_PASSWORD_2 ) . setData ( null , DBConstants . DISPLAY , DBConstants . INIT_MOVE ) ; } } return screen ; // By default , don ' t do anything
public class DiameterStackMultiplexer { /** * = = = = = EventListener < Request , Answer > IMPLEMENTATION = = = = = */ @ Override public void receivedSuccessMessage ( Request request , Answer answer ) { } }
DiameterListener listener = findListener ( request ) ; if ( listener != null ) { listener . receivedSuccessMessage ( request , answer ) ; }
public class Bitstream { /** * Parses the data previously read with read _ frame _ data ( ) . */ void parse_frame ( ) { } }
// Convert Bytes read to int int b = 0 ; byte [ ] byteread = frame_bytes ; int bytesize = framesize ; // Check ID3v1 TAG ( True only if last frame ) . // for ( int t = 0 ; t < ( byteread . length ) - 2 ; t + + ) // if ( ( byteread [ t ] = = ' T ' ) & & ( byteread [ t + 1 ] = = ' A ' ) & & ( byteread [ t + 2 ] = = ' G ' ) ) // System . out . println ( " ID3v1 detected at offset " + t ) ; // throw newBitstreamException ( INVALIDFRAME , null ) ; for ( int k = 0 ; k < bytesize ; k = k + 4 ) { // int convert = 0; byte b0 = 0 ; byte b1 = 0 ; byte b2 = 0 ; byte b3 = 0 ; b0 = byteread [ k ] ; if ( k + 1 < bytesize ) b1 = byteread [ k + 1 ] ; if ( k + 2 < bytesize ) b2 = byteread [ k + 2 ] ; if ( k + 3 < bytesize ) b3 = byteread [ k + 3 ] ; framebuffer [ b ++ ] = ( ( b0 << 24 ) & 0xFF000000 ) | ( ( b1 << 16 ) & 0x00FF0000 ) | ( ( b2 << 8 ) & 0x0000FF00 ) | ( b3 & 0x000000FF ) ; } wordpointer = 0 ; bitindex = 0 ;
public class cudnnStatus { /** * Returns a string representation of the given constant * @ return A string representation of the given constant */ public static String stringFor ( int n ) { } }
switch ( n ) { case CUDNN_STATUS_SUCCESS : return "CUDNN_STATUS_SUCCESS" ; case CUDNN_STATUS_NOT_INITIALIZED : return "CUDNN_STATUS_NOT_INITIALIZED" ; case CUDNN_STATUS_ALLOC_FAILED : return "CUDNN_STATUS_ALLOC_FAILED" ; case CUDNN_STATUS_BAD_PARAM : return "CUDNN_STATUS_BAD_PARAM" ; case CUDNN_STATUS_INTERNAL_ERROR : return "CUDNN_STATUS_INTERNAL_ERROR" ; case CUDNN_STATUS_INVALID_VALUE : return "CUDNN_STATUS_INVALID_VALUE" ; case CUDNN_STATUS_ARCH_MISMATCH : return "CUDNN_STATUS_ARCH_MISMATCH" ; case CUDNN_STATUS_MAPPING_ERROR : return "CUDNN_STATUS_MAPPING_ERROR" ; case CUDNN_STATUS_EXECUTION_FAILED : return "CUDNN_STATUS_EXECUTION_FAILED" ; case CUDNN_STATUS_NOT_SUPPORTED : return "CUDNN_STATUS_NOT_SUPPORTED" ; case CUDNN_STATUS_LICENSE_ERROR : return "CUDNN_STATUS_LICENSE_ERROR" ; case CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING : return "CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING" ; case CUDNN_STATUS_RUNTIME_IN_PROGRESS : return "CUDNN_STATUS_RUNTIME_IN_PROGRESS" ; case CUDNN_STATUS_RUNTIME_FP_OVERFLOW : return "CUDNN_STATUS_RUNTIME_FP_OVERFLOW" ; } return "INVALID cudnnStatus: " + n ;
public class EvidenceManager { /** * エビデンスファイルに出力する文字列を構築します 。 * @ return エビデンスファイルに出力する文字列 */ private String build ( Evidence evidence ) { } }
VelocityContext context = new VelocityContext ( ) ; StringWriter writer = new StringWriter ( ) ; context . put ( "records" , evidence . getRecords ( ) ) ; context . put ( "caseNo" , evidence . getCaseNo ( ) ) ; context . put ( "testScriptName" , evidence . getScriptName ( ) ) ; context . put ( "result" , ( evidence . hasError ( ) ) ? "NG" : "" ) ; Future < ? > screenshotRezeFuture = evidence . getScreenshotResizeFuture ( ) ; if ( screenshotRezeFuture != null ) { try { screenshotRezeFuture . get ( 3 , TimeUnit . SECONDS ) ; } catch ( InterruptedException | ExecutionException | TimeoutException e ) { LOG . warn ( "screenshot.resize.wait.error" , e ) ; } } tmpl . merge ( context , writer ) ; return writer . toString ( ) ;
public class Director { /** * Get installed feature collections * @ return Map of feature name to InstalledFeatureCollection */ public Map < String , InstalledFeatureCollection > getInstalledFeatureCollections ( ) { } }
try { Map < String , InstalledFeatureCollection > installedFeatureCollections = new TreeMap < String , InstalledFeatureCollection > ( ) ; Map < String , ProvisioningFeatureDefinition > fdMap = product . getFeatureCollectionDefinitions ( ) ; for ( Entry < String , ProvisioningFeatureDefinition > entry : fdMap . entrySet ( ) ) { installedFeatureCollections . put ( entry . getKey ( ) , new InstalledAssetImpl ( entry . getValue ( ) ) ) ; } return installedFeatureCollections ; } catch ( FeatureToolException rte ) { log ( Level . FINEST , "Director.getInstalledFeatureCollections() got exception." , rte ) ; return null ; }
public class MSPDIWriter { /** * This method writes data for a single assignment to an MSPDI file . * @ param mpx Resource assignment data * @ return New MSPDI assignment instance */ private Project . Assignments . Assignment writeAssignment ( ResourceAssignment mpx ) { } }
Project . Assignments . Assignment xml = m_factory . createProjectAssignmentsAssignment ( ) ; xml . setActualCost ( DatatypeConverter . printCurrency ( mpx . getActualCost ( ) ) ) ; xml . setActualFinish ( mpx . getActualFinish ( ) ) ; xml . setActualOvertimeCost ( DatatypeConverter . printCurrency ( mpx . getActualOvertimeCost ( ) ) ) ; xml . setActualOvertimeWork ( DatatypeConverter . printDuration ( this , mpx . getActualOvertimeWork ( ) ) ) ; xml . setActualStart ( mpx . getActualStart ( ) ) ; xml . setActualWork ( DatatypeConverter . printDuration ( this , mpx . getActualWork ( ) ) ) ; xml . setACWP ( DatatypeConverter . printCurrency ( mpx . getACWP ( ) ) ) ; xml . setBCWP ( DatatypeConverter . printCurrency ( mpx . getBCWP ( ) ) ) ; xml . setBCWS ( DatatypeConverter . printCurrency ( mpx . getBCWS ( ) ) ) ; xml . setBudgetCost ( DatatypeConverter . printCurrency ( mpx . getBudgetCost ( ) ) ) ; xml . setBudgetWork ( DatatypeConverter . printDuration ( this , mpx . getBudgetWork ( ) ) ) ; xml . setCost ( DatatypeConverter . printCurrency ( mpx . getCost ( ) ) ) ; if ( mpx . getCostRateTableIndex ( ) != 0 ) { xml . setCostRateTable ( BigInteger . valueOf ( mpx . getCostRateTableIndex ( ) ) ) ; } xml . setCreationDate ( mpx . getCreateDate ( ) ) ; xml . setCV ( DatatypeConverter . printCurrency ( mpx . getCV ( ) ) ) ; xml . setDelay ( DatatypeConverter . printDurationInIntegerTenthsOfMinutes ( mpx . getDelay ( ) ) ) ; xml . setFinish ( mpx . getFinish ( ) ) ; xml . setGUID ( mpx . getGUID ( ) ) ; xml . setHasFixedRateUnits ( Boolean . valueOf ( mpx . getVariableRateUnits ( ) == null ) ) ; xml . setFixedMaterial ( Boolean . valueOf ( mpx . getResource ( ) != null && mpx . getResource ( ) . getType ( ) == ResourceType . MATERIAL ) ) ; xml . setHyperlink ( mpx . getHyperlink ( ) ) ; xml . setHyperlinkAddress ( mpx . getHyperlinkAddress ( ) ) ; xml . setHyperlinkSubAddress ( mpx . getHyperlinkSubAddress ( ) ) ; xml . setLevelingDelay ( DatatypeConverter . printDurationInIntegerTenthsOfMinutes ( mpx . getLevelingDelay ( ) ) ) ; xml . setLevelingDelayFormat ( DatatypeConverter . printDurationTimeUnits ( mpx . getLevelingDelay ( ) , false ) ) ; if ( ! mpx . getNotes ( ) . isEmpty ( ) ) { xml . setNotes ( mpx . getNotes ( ) ) ; } xml . setOvertimeCost ( DatatypeConverter . printCurrency ( mpx . getOvertimeCost ( ) ) ) ; xml . setOvertimeWork ( DatatypeConverter . printDuration ( this , mpx . getOvertimeWork ( ) ) ) ; xml . setPercentWorkComplete ( NumberHelper . getBigInteger ( mpx . getPercentageWorkComplete ( ) ) ) ; xml . setRateScale ( mpx . getVariableRateUnits ( ) == null ? null : DatatypeConverter . printTimeUnit ( mpx . getVariableRateUnits ( ) ) ) ; xml . setRegularWork ( DatatypeConverter . printDuration ( this , mpx . getRegularWork ( ) ) ) ; xml . setRemainingCost ( DatatypeConverter . printCurrency ( mpx . getRemainingCost ( ) ) ) ; xml . setRemainingOvertimeCost ( DatatypeConverter . printCurrency ( mpx . getRemainingOvertimeCost ( ) ) ) ; xml . setRemainingOvertimeWork ( DatatypeConverter . printDuration ( this , mpx . getRemainingOvertimeWork ( ) ) ) ; xml . setRemainingWork ( DatatypeConverter . printDuration ( this , mpx . getRemainingWork ( ) ) ) ; xml . setResourceUID ( mpx . getResource ( ) == null ? BigInteger . valueOf ( NULL_RESOURCE_ID . intValue ( ) ) : BigInteger . valueOf ( NumberHelper . getInt ( mpx . getResourceUniqueID ( ) ) ) ) ; xml . setResume ( mpx . getResume ( ) ) ; xml . setStart ( mpx . getStart ( ) ) ; xml . setStop ( mpx . getStop ( ) ) ; xml . setSV ( DatatypeConverter . printCurrency ( mpx . getSV ( ) ) ) ; xml . setTaskUID ( NumberHelper . getBigInteger ( mpx . getTask ( ) . getUniqueID ( ) ) ) ; xml . setUID ( NumberHelper . getBigInteger ( mpx . getUniqueID ( ) ) ) ; xml . setUnits ( DatatypeConverter . printUnits ( mpx . getUnits ( ) ) ) ; xml . setVAC ( DatatypeConverter . printCurrency ( mpx . getVAC ( ) ) ) ; xml . setWork ( DatatypeConverter . printDuration ( this , mpx . getWork ( ) ) ) ; xml . setWorkContour ( mpx . getWorkContour ( ) ) ; xml . setCostVariance ( DatatypeConverter . printCurrency ( mpx . getCostVariance ( ) ) ) ; xml . setWorkVariance ( DatatypeConverter . printDurationInDecimalThousandthsOfMinutes ( mpx . getWorkVariance ( ) ) ) ; xml . setStartVariance ( DatatypeConverter . printDurationInIntegerThousandthsOfMinutes ( mpx . getStartVariance ( ) ) ) ; xml . setFinishVariance ( DatatypeConverter . printDurationInIntegerThousandthsOfMinutes ( mpx . getFinishVariance ( ) ) ) ; writeAssignmentBaselines ( xml , mpx ) ; writeAssignmentExtendedAttributes ( xml , mpx ) ; writeAssignmentTimephasedData ( mpx , xml ) ; m_eventManager . fireAssignmentWrittenEvent ( mpx ) ; return ( xml ) ;
public class NonBlockingPushbackReader { /** * Reads a single character . * @ return The character read , or - 1 if the end of the stream has been reached * @ exception IOException * If an I / O error occurs */ @ Override public int read ( ) throws IOException { } }
_ensureOpen ( ) ; if ( m_nBufPos < m_aBuf . length ) return m_aBuf [ m_nBufPos ++ ] ; return super . read ( ) ;
public class TopScreen { /** * Initialize . * @ param itsLocation The location of this component within the parent . * @ param parentScreen The parent screen . * @ param fieldConverter The field this screen field is linked to . * @ param iDisplayFieldDesc Do I display the field desc ? * @ param properties Extra properties */ public void init ( ScreenLocation itsLocation , BasePanel parentScreen , Converter fieldConverter , int iDisplayFieldDesc , Map < String , Object > properties ) { } }
if ( ! ( parentScreen instanceof BasePanel ) ) { // The parent is an SApplet ( but the view packages are not accessible from here ) m_recordOwnerParent = parentScreen ; if ( properties != null ) if ( properties . get ( DBParams . TASK ) instanceof Task ) m_recordOwnerParent = ( Task ) properties . get ( DBParams . TASK ) ; if ( m_recordOwnerParent != null ) m_recordOwnerParent . addRecordOwner ( this ) ; if ( properties != null ) { for ( String key : properties . keySet ( ) ) { if ( ! DBParams . TASK . equals ( key ) ) m_recordOwnerParent . setProperty ( key , properties . get ( key ) . toString ( ) ) ; } } parentScreen = null ; } super . init ( itsLocation , parentScreen , fieldConverter , iDisplayFieldDesc , properties ) ;
public class LabsInner { /** * Create or replace an existing Lab . * @ param resourceGroupName The name of the resource group . * @ param labAccountName The name of the lab Account . * @ param labName The name of the lab . * @ param lab Represents a lab . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < LabInner > createOrUpdateAsync ( String resourceGroupName , String labAccountName , String labName , LabInner lab , final ServiceCallback < LabInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( createOrUpdateWithServiceResponseAsync ( resourceGroupName , labAccountName , labName , lab ) , serviceCallback ) ;
public class GenJsCodeVisitorAssistantForMsgs { /** * Builds the googMsgVarName for an MsgNode . */ private String buildGoogMsgVarNameHelper ( MsgNode msgNode ) { } }
// NOTE : MSG _ UNNAMED / MSG _ EXTERNAL are a special tokens recognized by the jscompiler . MSG _ UNNAMED // disables the default logic that requires all messages to be uniquely named . // and MSG _ EXTERNAL causes the jscompiler to not extract these messages . String desiredName = jsSrcOptions . googMsgsAreExternal ( ) ? "MSG_EXTERNAL_" + MsgUtils . computeMsgIdForDualFormat ( msgNode ) : "MSG_UNNAMED" ; return translationContext . nameGenerator ( ) . generateName ( desiredName ) ;