signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class Descriptives { /** * Calculates Standard Error of Mean under SRS * @ param flatDataCollection * @ return */ public static double meanSE ( FlatDataCollection flatDataCollection ) { } }
double std = std ( flatDataCollection , true ) ; double meanSE = std / Math . sqrt ( count ( flatDataCollection ) ) ; return meanSE ;
public class DialogWrapper { /** * ( non - Javadoc ) * @ see javax . sip . Dialog # terminateOnBye ( boolean ) */ public void terminateOnBye ( boolean arg0 ) throws SipException { } }
this . terminateOnByeCached = arg0 ; if ( wrappedDialog != null ) { wrappedDialog . terminateOnBye ( arg0 ) ; }
public class JdbcRegistry { /** * Removes all of the api contracts from the database . * @ param client * @ param connection * @ throws SQLException */ protected void unregisterApiContracts ( Client client , Connection connection ) throws SQLException { } }
QueryRunner run = new QueryRunner ( ) ; run . update ( connection , "DELETE FROM contracts WHERE client_org_id = ? AND client_id = ? AND client_version = ?" , // $ NON - NLS - 1 $ client . getOrganizationId ( ) , client . getClientId ( ) , client . getVersion ( ) ) ;
public class VersionParser { /** * Checks for empty identifiers in the pre - release version or build metadata . * @ throws ParseException * if the pre - release version or build metadata have empty identifier ( s ) */ private void checkForEmptyIdentifier ( ) { } }
Character la = chars . lookahead ( 1 ) ; if ( CharType . DOT . isMatchedBy ( la ) || CharType . PLUS . isMatchedBy ( la ) || CharType . EOL . isMatchedBy ( la ) ) { throw new ParseException ( "Identifiers MUST NOT be empty" , new UnexpectedCharacterException ( la , chars . currentOffset ( ) , CharType . DIGIT , CharType . LETTER , CharType . HYPHEN ) ) ; }
public class XCollectionLiteralImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public boolean eIsSet ( int featureID ) { } }
switch ( featureID ) { case XbasePackage . XCOLLECTION_LITERAL__ELEMENTS : return elements != null && ! elements . isEmpty ( ) ; } return super . eIsSet ( featureID ) ;
public class DefaultConnectionManager { /** * { @ inheritDoc } * @ see jp . co . future . uroborosql . connection . ConnectionManager # commit ( ) */ @ Override public void commit ( ) { } }
if ( conn != null ) { try { conn . commit ( ) ; } catch ( SQLException e ) { throw new UroborosqlSQLException ( e ) ; } }
public class PatternsImpl { /** * Returns an application version ' s patterns . * @ param appId The application ID . * @ param versionId The version ID . * @ param getPatternsOptionalParameter the object representing the optional parameters to be set before calling this API * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < List < PatternRuleInfo > > getPatternsAsync ( UUID appId , String versionId , GetPatternsOptionalParameter getPatternsOptionalParameter , final ServiceCallback < List < PatternRuleInfo > > serviceCallback ) { } }
return ServiceFuture . fromResponse ( getPatternsWithServiceResponseAsync ( appId , versionId , getPatternsOptionalParameter ) , serviceCallback ) ;
public class JDBCCallableStatement { /** * # ifdef JAVA6 */ public synchronized void setNCharacterStream ( String parameterName , Reader value ) throws SQLException { } }
super . setNCharacterStream ( findParameterIndex ( parameterName ) , value ) ;
public class Task { /** * Clones this task to a new one . If you don ' t specify a clone strategy through { @ link # TASK _ CLONER } the new task is * instantiated via reflection and { @ link # copyTo ( Task ) } is invoked . * @ return the cloned task * @ throws TaskCloneException if the task cannot be successfully cloned . */ @ SuppressWarnings ( "unchecked" ) public Task < E > cloneTask ( ) { } }
if ( TASK_CLONER != null ) { try { return TASK_CLONER . cloneTask ( this ) ; } catch ( Throwable t ) { throw new TaskCloneException ( t ) ; } } try { Task < E > clone = copyTo ( ClassReflection . newInstance ( this . getClass ( ) ) ) ; clone . guard = guard == null ? null : guard . cloneTask ( ) ; return clone ; } catch ( ReflectionException e ) { throw new TaskCloneException ( e ) ; }
public class HELM1Utils { /** * method has to be changed ! ! ! including smiles - > to generate canonical * representation ; this method has to be tested in further detail */ private static Map < String , String > findAdHocMonomers ( String elements , String type ) throws HELM1FormatException , ValidationException , ChemistryException , org . helm . notation2 . parser . exceptionparser . NotationException { } }
/* find adHocMonomers */ try { Map < String , String > listMatches = new HashMap < String , String > ( ) ; String [ ] listelements = elements . split ( "\\." ) ; if ( type == "RNA" ) { for ( String element : listelements ) { List < String > monomerIds ; monomerIds = NucleotideParser . getMonomerIDListFromNucleotide ( element ) ; for ( String id : monomerIds ) { Monomer monomer = MonomerFactory . getInstance ( ) . getMonomerStore ( ) . getMonomer ( type , id ) ; if ( monomer . isAdHocMonomer ( ) ) { listMatches . put ( element , "[" + monomer . getCanSMILES ( ) + "]" ) ; } } } } else { for ( String element : listelements ) { Monomer monomer = MonomerFactory . getInstance ( ) . getMonomerStore ( ) . getMonomer ( type , element . replace ( "[" , "" ) . replace ( "]" , "" ) ) ; try { if ( monomer . isAdHocMonomer ( ) ) { listMatches . put ( element , "[" + monomer . getCanSMILES ( ) + "]" ) ; } } catch ( NullPointerException e ) { if ( ! ( Chemistry . getInstance ( ) . getManipulator ( ) . validateSMILES ( element . substring ( 1 , element . length ( ) - 1 ) ) ) ) { e . printStackTrace ( ) ; throw new ValidationException ( "SMILES as Monomer is not valid :" + element ) ; } } } } return listMatches ; } catch ( MonomerLoadingException e ) { e . printStackTrace ( ) ; throw new HELM1FormatException ( e . getMessage ( ) ) ; }
public class AsmUtils { /** * Checks if two { @ link LdcInsnNode } are equals . * @ param insn1 the insn1 * @ param insn2 the insn2 * @ return true , if successful */ public static boolean ldcInsnEqual ( LdcInsnNode insn1 , LdcInsnNode insn2 ) { } }
if ( insn1 . cst . equals ( "~" ) || insn2 . cst . equals ( "~" ) ) return true ; return insn1 . cst . equals ( insn2 . cst ) ;
public class OutputProperties { /** * Searches for the list of qname properties with the specified key in * the property list . * If the key is not found in this property list , the default property list , * and its defaults , recursively , are then checked . The method returns * < code > null < / code > if the property is not found . * @ param key the property key . * @ param props the list of properties to search in . * @ return the value in this property list as a vector of QNames , or false * if null or not " yes " . */ public static Vector getQNameProperties ( String key , Properties props ) { } }
String s = props . getProperty ( key ) ; if ( null != s ) { Vector v = new Vector ( ) ; int l = s . length ( ) ; boolean inCurly = false ; FastStringBuffer buf = new FastStringBuffer ( ) ; // parse through string , breaking on whitespaces . I do this instead // of a tokenizer so I can track whitespace inside of curly brackets , // which theoretically shouldn ' t happen if they contain legal URLs . for ( int i = 0 ; i < l ; i ++ ) { char c = s . charAt ( i ) ; if ( Character . isWhitespace ( c ) ) { if ( ! inCurly ) { if ( buf . length ( ) > 0 ) { QName qname = QName . getQNameFromString ( buf . toString ( ) ) ; v . addElement ( qname ) ; buf . reset ( ) ; } continue ; } } else if ( '{' == c ) inCurly = true ; else if ( '}' == c ) inCurly = false ; buf . append ( c ) ; } if ( buf . length ( ) > 0 ) { QName qname = QName . getQNameFromString ( buf . toString ( ) ) ; v . addElement ( qname ) ; buf . reset ( ) ; } return v ; } else return null ;
public class FileWindow { /** * Selects a range of characters . */ public void select ( int start , int end ) { } }
int docEnd = textArea . getDocument ( ) . getLength ( ) ; textArea . select ( docEnd , docEnd ) ; textArea . select ( start , end ) ;
public class P4_InsertOrUpdateOp { /** * 判断对象是否有主键值 , 必须全部有才返回true */ private < T > boolean isWithKey ( T t , List < Field > fields ) { } }
if ( t == null || fields == null || fields . isEmpty ( ) ) { return false ; } List < Field > keyFields = DOInfoReader . getKeyColumns ( t . getClass ( ) ) ; if ( keyFields . isEmpty ( ) ) { return false ; } for ( Field keyField : keyFields ) { if ( DOInfoReader . getValue ( keyField , t ) == null ) { return false ; } } return true ;
public class authenticationtacacspolicy_authenticationvserver_binding { /** * Use this API to fetch authenticationtacacspolicy _ authenticationvserver _ binding resources of given name . */ public static authenticationtacacspolicy_authenticationvserver_binding [ ] get ( nitro_service service , String name ) throws Exception { } }
authenticationtacacspolicy_authenticationvserver_binding obj = new authenticationtacacspolicy_authenticationvserver_binding ( ) ; obj . set_name ( name ) ; authenticationtacacspolicy_authenticationvserver_binding response [ ] = ( authenticationtacacspolicy_authenticationvserver_binding [ ] ) obj . get_resources ( service ) ; return response ;
public class TableMetaReader { /** * テーブルメタデータを読み取ります 。 * @ return テーブルメタデータ */ public List < TableMeta > read ( ) { } }
Connection con = JdbcUtil . getConnection ( dataSource ) ; try { DatabaseMetaData metaData = con . getMetaData ( ) ; List < TableMeta > tableMetas = getTableMetas ( metaData , schemaName != null ? schemaName : getDefaultSchemaName ( metaData ) ) ; for ( TableMeta tableMeta : tableMetas ) { Set < String > primaryKeySet = getPrimaryKeys ( metaData , tableMeta ) ; handleColumnMeta ( metaData , tableMeta , primaryKeySet ) ; } if ( dialect . isJdbcCommentUnavailable ( ) ) { readCommentFromDictinary ( con , tableMetas ) ; } return tableMetas ; } catch ( SQLException e ) { throw new GenException ( Message . DOMAGEN9001 , e , e ) ; } finally { JdbcUtil . close ( con ) ; }
public class BiddableAdGroupCriterion { /** * Sets the firstPageCpc value for this BiddableAdGroupCriterion . * @ param firstPageCpc * First page Cpc for this criterion . * < span class = " constraint ReadOnly " > This field is * read only and will be ignored when sent to the API . < / span > */ public void setFirstPageCpc ( com . google . api . ads . adwords . axis . v201809 . cm . Bid firstPageCpc ) { } }
this . firstPageCpc = firstPageCpc ;
public class GlobusGSSManagerImpl { /** * Currently not implemented . */ public GSSName createName ( byte name [ ] , Oid nameType , Oid mech ) throws GSSException { } }
throw new GSSException ( GSSException . UNAVAILABLE ) ;
public class TwiML { /** * Convert TwiML object to XML . * @ return XML string of TwiML object * @ throws TwiMLException if cannot generate XML */ public String toXml ( ) throws TwiMLException { } }
try { Document doc = DocumentBuilderFactory . newInstance ( ) . newDocumentBuilder ( ) . newDocument ( ) ; doc . setXmlStandalone ( true ) ; doc . appendChild ( this . buildXmlElement ( doc ) ) ; Transformer transformer = TransformerFactory . newInstance ( ) . newTransformer ( ) ; transformer . setOutputProperty ( OutputKeys . INDENT , "no" ) ; DOMSource source = new DOMSource ( doc ) ; StreamResult output = new StreamResult ( new StringWriter ( ) ) ; transformer . transform ( source , output ) ; return output . getWriter ( ) . toString ( ) . trim ( ) ; } catch ( TransformerException te ) { throw new TwiMLException ( "Exception serializing TwiML: " + te . getMessage ( ) ) ; } catch ( Exception e ) { throw new TwiMLException ( "Unhandled exception: " + e . getMessage ( ) ) ; }
public class ApplicationOperations { /** * Gets information about the specified application . * @ param applicationId The ID of the application to get . * @ param additionalBehaviors A collection of { @ link BatchClientBehavior } instances that are applied to the Batch service request . * @ return An { @ link ApplicationSummary } containing information about the specified application . * @ throws BatchErrorException Exception thrown when an error response is received from the Batch service . * @ throws IOException Exception thrown when there is an error in serialization / deserialization of data sent to / received from the Batch service . */ public ApplicationSummary getApplication ( String applicationId , Iterable < BatchClientBehavior > additionalBehaviors ) throws BatchErrorException , IOException { } }
ApplicationGetOptions options = new ApplicationGetOptions ( ) ; BehaviorManager bhMgr = new BehaviorManager ( this . customBehaviors ( ) , additionalBehaviors ) ; bhMgr . applyRequestBehaviors ( options ) ; return this . parentBatchClient . protocolLayer ( ) . applications ( ) . get ( applicationId , options ) ;
public class LongRunningJobManager { /** * End a job . * @ param sJobID * The internal long running job ID created from * { @ link # onStartJob ( ILongRunningJob , String ) } . * @ param eExecSucess * Was the job execution successful or not from a technical point of * view ? May not be < code > null < / code > . If a JobExecutionException was * thrown , this should be { @ link ESuccess # FAILURE } . * @ param aResult * The main job results . */ public void onEndJob ( @ Nullable final String sJobID , @ Nonnull final ESuccess eExecSucess , @ Nonnull final LongRunningJobResult aResult ) { } }
ValueEnforcer . notNull ( eExecSucess , "ExecSuccess" ) ; ValueEnforcer . notNull ( aResult , "Result" ) ; // Remove from running job list final LongRunningJobData aJobData = m_aRWLock . writeLocked ( ( ) -> { final LongRunningJobData ret = m_aRunningJobs . remove ( sJobID ) ; if ( ret == null ) throw new IllegalArgumentException ( "Illegal job ID '" + sJobID + "' passed!" ) ; // End the job - inside the writeLock ret . onJobEnd ( eExecSucess , aResult ) ; return ret ; } ) ; // Remember it m_aResultMgr . addResult ( aJobData ) ;
public class MediaTypes { /** * Ctor . * @ param text Text to parse * @ return List of media types */ @ SuppressWarnings ( "PMD.AvoidInstantiatingObjectsInLoops" ) private static SortedSet < MediaType > parse ( final String text ) { } }
final SortedSet < MediaType > list = new TreeSet < > ( ) ; for ( final String name : new EnglishLowerCase ( text ) . string ( ) . split ( "," ) ) { if ( ! name . isEmpty ( ) ) { list . add ( new MediaType ( name ) ) ; } } return list ;
public class InteropFramework { /** * Reads a document from a file , using the format to decide which parser to read the file with . * @ param filename the file to read a document from * @ param format the format of the file * @ return a Document */ public Document readDocumentFromFile ( String filename , ProvFormat format ) { } }
try { switch ( format ) { case DOT : case JPEG : case PNG : case SVG : throw new UnsupportedOperationException ( ) ; // we don ' t load PROV // from these // formats case JSON : { return new org . openprovenance . prov . json . Converter ( pFactory ) . readDocument ( filename ) ; } case PROVN : { Utility u = new Utility ( ) ; CommonTree tree = u . convertASNToTree ( filename ) ; Object o = u . convertTreeToJavaBean ( tree , pFactory ) ; Document doc = ( Document ) o ; // Namespace ns = Namespace . gatherNamespaces ( doc ) ; // doc . setNamespace ( ns ) ; return doc ; } case RDFXML : case TRIG : case TURTLE : { org . openprovenance . prov . rdf . Utility rdfU = new org . openprovenance . prov . rdf . Utility ( pFactory , onto ) ; Document doc = rdfU . parseRDF ( filename ) ; return doc ; } case XML : { File in = new File ( filename ) ; ProvDeserialiser deserial = ProvDeserialiser . getThreadProvDeserialiser ( ) ; Document doc = deserial . deserialiseDocument ( in ) ; return doc ; } default : { System . out . println ( "Unknown format " + filename ) ; throw new UnsupportedOperationException ( ) ; } } } catch ( IOException e ) { throw new InteropException ( e ) ; } catch ( RDFParseException e ) { throw new InteropException ( e ) ; } catch ( RDFHandlerException e ) { throw new InteropException ( e ) ; } catch ( JAXBException e ) { throw new InteropException ( e ) ; } catch ( RecognitionException e ) { throw new InteropException ( e ) ; }
public class Caster { /** * cast a Object to a int value ( primitive value type ) * @ param o Object to cast * @ return casted int value * @ throws PageException */ public static int toIntValue ( Object o ) throws PageException { } }
if ( o instanceof Number ) return ( ( Number ) o ) . intValue ( ) ; else if ( o instanceof Boolean ) return ( ( Boolean ) o ) . booleanValue ( ) ? 1 : 0 ; else if ( o instanceof CharSequence ) return toIntValue ( o . toString ( ) . trim ( ) ) ; else if ( o instanceof Character ) return ( int ) ( ( ( Character ) o ) . charValue ( ) ) ; // else if ( o instanceof Clob ) return toIntValue ( toString ( o ) ) ; else if ( o instanceof Castable ) return ( int ) ( ( Castable ) o ) . castToDoubleValue ( ) ; else if ( o instanceof Date ) return ( int ) new DateTimeImpl ( ( Date ) o ) . castToDoubleValue ( ) ; if ( o instanceof String ) throw new ExpressionException ( "Can't cast String [" + CasterException . crop ( o ) + "] to a number" ) ; else if ( o instanceof ObjectWrap ) return toIntValue ( ( ( ObjectWrap ) o ) . getEmbededObject ( ) ) ; throw new CasterException ( o , "number" ) ;
public class Route { /** * Return { @ code true } if all route properties are { @ code null } or empty . * @ return { @ code true } if all route properties are { @ code null } or empty */ public boolean isEmpty ( ) { } }
return _name == null && _comment == null && _description == null && _source == null && _links . isEmpty ( ) && _number == null && _points . isEmpty ( ) ;
public class MergeRequestApi { /** * Get all merge requests matching the filter . * < pre > < code > GitLab Endpoint : GET / merge _ requests < / code > < / pre > * @ param filter a MergeRequestFilter instance with the filter settings * @ return all merge requests for the specified project matching the filter * @ throws GitLabApiException if any exception occurs */ public List < MergeRequest > getMergeRequests ( MergeRequestFilter filter ) throws GitLabApiException { } }
return ( getMergeRequests ( filter , getDefaultPerPage ( ) ) . all ( ) ) ;
public class NotifdEventConsumer { private void connectToNotificationDaemon ( DbEventImportInfo received ) throws DevFailed { } }
boolean channel_exported = received . channel_exported ; if ( channel_exported ) { org . omg . CORBA . Object event_channel_obj = orb . string_to_object ( received . channel_ior ) ; try { eventChannel = EventChannelHelper . narrow ( event_channel_obj ) ; // Set timeout on eventChannel final org . omg . CORBA . Policy p = new org . jacorb . orb . policies . RelativeRoundtripTimeoutPolicy ( 10000 * 3000 ) ; eventChannel . _set_policy_override ( new Policy [ ] { p } , org . omg . CORBA . SetOverrideType . ADD_OVERRIDE ) ; } catch ( RuntimeException e ) { Except . throw_event_system_failed ( "API_NotificationServiceFailed" , "Failed to connect notification daemon (hint : make sure the notifd daemon is running on this host" , "EventConsumer.connect_event_channel" ) ; } if ( eventChannel == null ) { channel_exported = false ; } } if ( ! channel_exported ) { Except . throw_event_system_failed ( "API_NotificationServiceFailed" , "Failed to narrow EventChannel from notification daemon (hint : make sure the notifd daemon is running on this host" , "EventConsumer.connect_event_channel" ) ; } // Obtain a consumer admin : we ' ll use the channel ' s default consumer admin try { consumerAdmin = eventChannel . default_consumer_admin ( ) ; } catch ( Exception e ) { Except . throw_event_system_failed ( "API_NotificationServiceFailed" , "Received " + e . toString ( ) + "\nduring eventChannel.default_consumer_admin() call" , "EventConsumer.connect_event_channel" ) ; } if ( consumerAdmin == null ) { Except . throw_event_system_failed ( "API_NotificationServiceFailed" , "Failed to get default consumer admin from notification daemon (hint : make sure the notifd daemon is running on this host" , "EventConsumer.connect_event_channel" ) ; } // Obtain a ProxySupplier : we are using Push model and Structured data org . omg . CORBA . IntHolder pId = new org . omg . CORBA . IntHolder ( ) ; try { proxySupplier = consumerAdmin . obtain_notification_push_supplier ( ClientType . STRUCTURED_EVENT , pId ) ; if ( proxySupplier == null ) { Except . throw_event_system_failed ( "API_NotificationServiceFailed" , "Failed to get a push supplier from notification daemon (hint : make sure the notifd daemon is running on this host" , "EventConsumer.connect_event_channel" ) ; } } catch ( org . omg . CORBA . TIMEOUT ex ) { Except . throw_event_system_failed ( "API_NotificationServiceFailed" , "Failed to get a push supplier due to a Timeout" , "EventConsumer.connect_event_channel" ) ; } catch ( org . omg . CosNotifyChannelAdmin . AdminLimitExceeded ex ) { Except . throw_event_system_failed ( "API_NotificationServiceFailed" , "Failed to get a push supplier due to AdminLimitExceeded (hint : make sure the notifd daemon is running on this host" , "EventConsumer.connect_event_channel" ) ; }
public class H2O { /** * Print help about command line arguments . */ public static void printHelp ( ) { } }
String defaultFlowDirMessage ; if ( DEFAULT_FLOW_DIR ( ) == null ) { // If you start h2o on Hadoop , you must set - flow _ dir . // H2O doesn ' t know how to guess a good one . // user . home doesn ' t make sense . defaultFlowDirMessage = " (The default is none; saving flows not available.)\n" ; } else { defaultFlowDirMessage = " (The default is '" + DEFAULT_FLOW_DIR ( ) + "'.)\n" ; } String s = "\n" + "Usage: java [-Xmx<size>] -jar h2o.jar [options]\n" + " (Note that every option has a default and is optional.)\n" + "\n" + " -h | -help\n" + " Print this help.\n" + "\n" + " -version\n" + " Print version info and exit.\n" + "\n" + " -name <h2oCloudName>\n" + " Cloud name used for discovery of other nodes.\n" + " Nodes with the same cloud name will form an H2O cloud\n" + " (also known as an H2O cluster).\n" + "\n" + " -flatfile <flatFileName>\n" + " Configuration file explicitly listing H2O cloud node members.\n" + "\n" + " -ip <ipAddressOfNode>\n" + " IP address of this node.\n" + "\n" + " -port <port>\n" + " Port number for this node (note: port+1 is also used by default).\n" + " (The default port is " + ARGS . port + ".)\n" + "\n" + " -network <IPv4network1Specification>[,<IPv4network2Specification> ...]\n" + " The IP address discovery code will bind to the first interface\n" + " that matches one of the networks in the comma-separated list.\n" + " Use instead of -ip when a broad range of addresses is legal.\n" + " (Example network specification: '10.1.2.0/24' allows 256 legal\n" + " possibilities.)\n" + "\n" + " -ice_root <fileSystemPath>\n" + " The directory where H2O spills temporary data to disk.\n" + "\n" + " -log_dir <fileSystemPath>\n" + " The directory where H2O writes logs to disk.\n" + " (This usually has a good default that you need not change.)\n" + "\n" + " -log_level <TRACE,DEBUG,INFO,WARN,ERRR,FATAL>\n" + " Write messages at this logging level, or above. Default is INFO." + "\n" + "\n" + " -flow_dir <server side directory or HDFS directory>\n" + " The directory where H2O stores saved flows.\n" + defaultFlowDirMessage + "\n" + " -nthreads <#threads>\n" + " Maximum number of threads in the low priority batch-work queue.\n" + " (The default is " + ( char ) Runtime . getRuntime ( ) . availableProcessors ( ) + ".)\n" + "\n" + " -client\n" + " Launch H2O node in client mode.\n" + "\n" + " -notify_local <fileSystemPath>" + " Specifies a file to write when the node is up. The file contains one line with the IP and" + " port of the embedded web server. e.g. 192.168.1.100:54321" + "\n" + " -context_path <context_path>\n" + " The context path for jetty.\n" + "\n" + "Authentication options:\n" + "\n" + " -jks <filename>\n" + " Java keystore file\n" + "\n" + " -jks_pass <password>\n" + " (Default is '" + DEFAULT_JKS_PASS + "')\n" + "\n" + " -hash_login\n" + " Use Jetty HashLoginService\n" + "\n" + " -ldap_login\n" + " Use Jetty Ldap login module\n" + "\n" + " -kerberos_login\n" + " Use Jetty Kerberos login module\n" + "\n" + " -spnego_login\n" + " Use Jetty SPNEGO login service\n" + "\n" + " -pam_login\n" + " Use Jetty PAM login module\n" + "\n" + " -login_conf <filename>\n" + " LoginService configuration file\n" + "\n" + " -spnego_properties <filename>\n" + " SPNEGO login module configuration file\n" + "\n" + " -form_auth\n" + " Enables Form-based authentication for Flow (default is Basic authentication)\n" + "\n" + " -session_timeout <minutes>\n" + " Specifies the number of minutes that a session can remain idle before the server invalidates\n" + " the session and requests a new login. Requires '-form_auth'. Default is no timeout\n" + "\n" + " -internal_security_conf <filename>\n" + " Path (absolute or relative) to a file containing all internal security related configurations\n" + "\n" + "Cloud formation behavior:\n" + "\n" + " New H2O nodes join together to form a cloud at startup time.\n" + " Once a cloud is given work to perform, it locks out new members\n" + " from joining.\n" + "\n" + "Examples:\n" + "\n" + " Start an H2O node with 4GB of memory and a default cloud name:\n" + " $ java -Xmx4g -jar h2o.jar\n" + "\n" + " Start an H2O node with 6GB of memory and a specify the cloud name:\n" + " $ java -Xmx6g -jar h2o.jar -name MyCloud\n" + "\n" + " Start an H2O cloud with three 2GB nodes and a default cloud name:\n" + " $ java -Xmx2g -jar h2o.jar &\n" + " $ java -Xmx2g -jar h2o.jar &\n" + " $ java -Xmx2g -jar h2o.jar &\n" + "\n" ; System . out . print ( s ) ; for ( AbstractH2OExtension e : extManager . getCoreExtensions ( ) ) { e . printHelp ( ) ; }
public class DatastoreSnippets { /** * [ VARIABLE " my _ key _ name2 " ] */ public void batchAddEntities ( String keyName1 , String keyName2 ) { } }
// [ START batchAddEntities ] Key key1 = datastore . newKeyFactory ( ) . setKind ( "MyKind" ) . newKey ( keyName1 ) ; Entity . Builder entityBuilder1 = Entity . newBuilder ( key1 ) ; entityBuilder1 . set ( "propertyName" , "value1" ) ; Entity entity1 = entityBuilder1 . build ( ) ; Key key2 = datastore . newKeyFactory ( ) . setKind ( "MyKind" ) . newKey ( keyName2 ) ; Entity . Builder entityBuilder2 = Entity . newBuilder ( key2 ) ; entityBuilder2 . set ( "propertyName" , "value2" ) ; Entity entity2 = entityBuilder2 . build ( ) ; try { datastore . add ( entity1 , entity2 ) ; } catch ( DatastoreException ex ) { if ( "ALREADY_EXISTS" . equals ( ex . getReason ( ) ) ) { // at least one of entity1 . getKey ( ) and entity2 . getKey ( ) already exists } } // [ END batchAddEntities ]
public class BeanReferenceInspector { /** * Reserves to bean reference inspection . * @ param beanId the bean id * @ param beanClass the bean class * @ param referenceable the object to be inspected * @ param ruleAppender the rule appender */ public void reserve ( String beanId , Class < ? > beanClass , BeanReferenceable referenceable , RuleAppender ruleAppender ) { } }
RefererKey key = new RefererKey ( beanClass , beanId ) ; Set < RefererInfo > refererInfoSet = refererInfoMap . get ( key ) ; if ( refererInfoSet == null ) { refererInfoSet = new LinkedHashSet < > ( ) ; refererInfoSet . add ( new RefererInfo ( referenceable , ruleAppender ) ) ; refererInfoMap . put ( key , refererInfoSet ) ; } else { refererInfoSet . add ( new RefererInfo ( referenceable , ruleAppender ) ) ; }
public class Names { /** * Subscription names must be lowercase ASCII strings . between 1 and 255 characters in length . Whitespace , ISO * control characters and certain punctuation characters that aren ' t generally allowed in file names or in * elasticsearch index names are excluded ( elasticsearch appears to allow : ! $ % & ( ) + - . : ; = @ [ ] ^ _ ` { } ~ ) . Subscription * names may not begin with a single underscore to allow URL space for extensions such as " / _ subscription / . . . " . * Queue names may not look like relative paths , ie . " . " or " . . " . */ public static boolean isLegalSubscriptionName ( String subscription ) { } }
return subscription != null && subscription . length ( ) > 0 && subscription . length ( ) <= 255 && ! ( subscription . charAt ( 0 ) == '_' && ! subscription . startsWith ( "__" ) ) && ! ( subscription . charAt ( 0 ) == '.' && ( "." . equals ( subscription ) || ".." . equals ( subscription ) ) ) && SUBSCRIPTION_NAME_ALLOWED . matchesAllOf ( subscription ) ;
public class ControllersInner { /** * Lists connection details for an Azure Dev Spaces Controller . * Lists connection details for the underlying container resources of an Azure Dev Spaces Controller . * @ param resourceGroupName Resource group to which the resource belongs . * @ param name Name of the resource . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the ControllerConnectionDetailsListInner object */ public Observable < ServiceResponse < ControllerConnectionDetailsListInner > > listConnectionDetailsWithServiceResponseAsync ( String resourceGroupName , String name ) { } }
if ( this . client . subscriptionId ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.subscriptionId() is required and cannot be null." ) ; } if ( resourceGroupName == null ) { throw new IllegalArgumentException ( "Parameter resourceGroupName is required and cannot be null." ) ; } if ( name == null ) { throw new IllegalArgumentException ( "Parameter name is required and cannot be null." ) ; } if ( this . client . apiVersion ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.apiVersion() is required and cannot be null." ) ; } return service . listConnectionDetails ( this . client . subscriptionId ( ) , resourceGroupName , name , this . client . apiVersion ( ) , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < ControllerConnectionDetailsListInner > > > ( ) { @ Override public Observable < ServiceResponse < ControllerConnectionDetailsListInner > > call ( Response < ResponseBody > response ) { try { ServiceResponse < ControllerConnectionDetailsListInner > clientResponse = listConnectionDetailsDelegate ( response ) ; return Observable . just ( clientResponse ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ;
public class CountryCodes { /** * Returns the IBAN length for a given country code . * @ param countryCode a non - null , uppercase , two - character country code . * @ return the IBAN length for the given country , or - 1 if the input is not a known , two - character country code . * @ throws NullPointerException if the input is null . */ public static int getLengthForCountryCode ( String countryCode ) { } }
int index = indexOf ( countryCode ) ; if ( index > - 1 ) { return CountryCodes . COUNTRY_IBAN_LENGTHS [ index ] & REMOVE_SEPA_MASK ; } return - 1 ;
public class JideApplicationPage { /** * This sets the visible flag on all show view commands that * are registered with the command manager . If the page contains * the view the command is visible , otherwise not . The registration * of the show view command with the command manager is the * responsibility of the view descriptor . */ public void updateShowViewCommands ( ) { } }
ViewDescriptorRegistry viewDescriptorRegistry = ValkyrieRepository . getInstance ( ) . getApplicationConfig ( ) . viewDescriptorRegistry ( ) ; ViewDescriptor [ ] views = viewDescriptorRegistry . getViewDescriptors ( ) ; for ( ViewDescriptor view : views ) { String id = view . getId ( ) ; CommandManager commandManager = window . getCommandManager ( ) ; /* if ( commandManager . containsActionCommand ( id ) ) { ActionCommand command = commandManager . getActionCommand ( id ) ; command . setVisible ( pageViews . contains ( views [ i ] . getId ( ) ) ) ; */ if ( commandManager . isTypeMatch ( id , ActionCommand . class ) ) { ActionCommand command = ( ActionCommand ) commandManager . getCommand ( id , ActionCommand . class ) ; command . setVisible ( pageViews . contains ( view . getId ( ) ) ) ; } }
public class BigRational { /** * Returns the largest of the specified rational numbers . * @ param values the rational numbers to compare * @ return the largest rational number , 0 if no numbers are specified * @ see # max ( BigRational ) */ public static BigRational max ( BigRational ... values ) { } }
if ( values . length == 0 ) { return BigRational . ZERO ; } BigRational result = values [ 0 ] ; for ( int i = 1 ; i < values . length ; i ++ ) { result = result . max ( values [ i ] ) ; } return result ;
public class EmbeddedNeo4jEntityQueries { /** * Creates the node corresponding to an entity . * @ param executionEngine the { @ link GraphDatabaseService } used to run the query * @ param columnValues the values in { @ link org . hibernate . ogm . model . key . spi . EntityKey # getColumnValues ( ) } * @ return the corresponding node */ public Node insertEntity ( GraphDatabaseService executionEngine , Object [ ] columnValues ) { } }
Map < String , Object > params = params ( columnValues ) ; Result result = executionEngine . execute ( getCreateEntityQuery ( ) , params ) ; return singleResult ( result ) ;
public class AbstractIntSet { /** * { @ inheritDoc } */ @ Override public int compareTo ( IntSet o ) { } }
IntIterator thisIterator = this . descendingIterator ( ) ; IntIterator otherIterator = o . descendingIterator ( ) ; while ( thisIterator . hasNext ( ) && otherIterator . hasNext ( ) ) { int thisItem = thisIterator . next ( ) ; int otherItem = otherIterator . next ( ) ; if ( thisItem < otherItem ) return - 1 ; if ( thisItem > otherItem ) return 1 ; } return thisIterator . hasNext ( ) ? 1 : ( otherIterator . hasNext ( ) ? - 1 : 0 ) ;
public class JsHdrsImpl { /** * Get the value of the Priority field from the message header . * Javadoc description supplied by SIBusMessage interface . */ public final Integer getPriority ( ) { } }
// If the transient is not set , get the int value from the message and cache it . if ( cachedPriority == null ) { cachedPriority = ( Integer ) getHdr2 ( ) . getField ( JsHdr2Access . PRIORITY ) ; } // Return the ( possibly newly ) cached value return cachedPriority ;
public class Case2Cases { /** * Matches a case class of two elements . * < p > If matched , the { @ code b } value is decomposed to 0. */ public static < T extends Case2 < A , B > , A , B , EB extends B > DecomposableMatchBuilder0 < T > case2 ( Class < T > clazz , MatchesExact < A > a , DecomposableMatchBuilder0 < EB > b ) { } }
List < Matcher < Object > > matchers = Lists . of ( ArgumentMatchers . eq ( a . t ) , ArgumentMatchers . any ( ) ) ; return new DecomposableMatchBuilder1 < T , EB > ( matchers , 1 , new Case2FieldExtractor < > ( clazz ) ) . decomposeFirst ( b ) ;
public class JSRemoteConsumerPoint { /** * / * ( non - Javadoc ) * @ see com . ibm . ws . sib . processor . impl . interfaces . ConsumerPoint # destinationMatches ( com . ibm . ws . sib . processor . impl . interfaces . DestinationHandler ) */ public boolean destinationMatches ( DestinationHandler destinationHandlerToCompare , JSConsumerManager consumerDispatcher ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "destinationMatches" , new Object [ ] { destinationHandlerToCompare , consumerDispatcher } ) ; // For remote get , at the destination localising ME , the destination attached // to is never an alias as the alias resolution is done at the getting ME boolean matches = ( consumerDispatcher . getDestination ( ) == destinationHandlerToCompare ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "destinationMatches" , Boolean . valueOf ( matches ) ) ; return matches ;
public class JSConsumerSet { /** * Commit the adding of the active message ( remember , this is nothing to * do with the committing of the delete of the message ) . * We will have held the maxActiveMessagePrepareLock since the prepare * of the add until this method ( or a rollback ) */ public void commitAddActiveMessage ( ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "commitAddActiveMessage" ) ; // Lock the active message counter lock synchronized ( maxActiveMessageLock ) { // Move the prepared add to the real count currentActiveMessages ++ ; preparedActiveMessages -- ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( tc , "Active Messages: current: " + currentActiveMessages + ", prepared: " + preparedActiveMessages + ", maximum: " + maxActiveMessages + " (suspended: " + consumerSetSuspended + ")" ) ; // We should never go negative , if we do something has gone wrong - FFDC if ( preparedActiveMessages < 0 ) { SIErrorException e = new SIErrorException ( nls . getFormattedMessage ( "INTERNAL_MESSAGING_ERROR_CWSIP0001" , new Object [ ] { "com.ibm.ws.sib.processor.impl.JSConsumerSet.commitAddActiveMessage" , "1:499:1.16" , Integer . valueOf ( preparedActiveMessages ) , Integer . valueOf ( currentActiveMessages ) , Integer . valueOf ( maxActiveMessages ) , Boolean . valueOf ( consumerSetSuspended ) } , null ) ) ; FFDCFilter . processException ( e , "com.ibm.ws.sib.processor.impl.JSConsumerSet.commitAddActiveMessage" , "1:510:1.16" , this ) ; SibTr . exception ( tc , e ) ; SibTr . error ( tc , "INTERNAL_MESSAGING_ERROR_CWSIP0001" , new Object [ ] { "com.ibm.ws.sib.processor.impl.JSConsumerSet.commitAddActiveMessage" , "1:517:1.16" } ) ; } // If we currently hold the write lock ( as we will always either hold // the read lock of the write lock we know this check will return the right // answer ( i . e . it can ' t change under us ) ) we must have thought this add // would take us up to the limit and therefore require a suspend . if ( maxActiveMessagePrepareLock . isWriteLockedByCurrentThread ( ) ) { // If the limit has been reached , suspend the set if ( currentActiveMessages == maxActiveMessages ) { // All this does is mark the set as suspended . We leave it up to the members // of the set to notice that the set is suspended and suspend themselves . // This is more efficient than going round and suspending everyone , only to // have to resume then all again in a second . Hopefully the set will be // resumed before most of them notice it was suspended . consumerSetSuspended = true ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( tc , "JSConsumerSet suspended " + this ) ; } // Release the write lock maxActiveMessagePrepareWriteLock . unlock ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( tc , "maxActiveMessagePrepareWriteLock.unlock(): " + maxActiveMessagePrepareLock ) ; } else { // We only held a read lock , release it now maxActiveMessagePrepareReadLock . unlock ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( tc , "maxActiveMessagePrepareReadLock.unlock(): " + maxActiveMessagePrepareLock ) ; } } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "commitAddActiveMessage" ) ;
public class Options { /** * Add an option that only contains a short name . * The option does not take an argument . * @ param opt Short single - character name of the option . * @ param description Self - documenting description * @ return the resulting Options instance * @ since 1.3 */ public Options addOption ( String opt , String description ) { } }
addOption ( opt , null , false , description ) ; return this ;
public class SecuritySchemeValidator { /** * { @ inheritDoc } */ @ Override public void validate ( ValidationHelper helper , Context context , String key , SecurityScheme t ) { } }
String reference = t . getRef ( ) ; if ( reference != null && ! reference . isEmpty ( ) ) { ValidatorUtils . referenceValidatorHelper ( reference , t , helper , context , key ) ; return ; } Optional < ValidationEvent > op_type = ValidatorUtils . validateRequiredField ( t . getType ( ) , context , "type" ) ; if ( op_type . isPresent ( ) ) { op_type . ifPresent ( helper :: addValidationEvent ) ; } else { String type = t . getType ( ) . toString ( ) ; if ( "apiKey" . equals ( type ) ) { ValidatorUtils . validateRequiredField ( t . getName ( ) , context , "name" ) . ifPresent ( helper :: addValidationEvent ) ; Optional < ValidationEvent > op_in = ValidatorUtils . validateRequiredField ( t . getIn ( ) , context , "in" ) ; if ( op_in . isPresent ( ) ) { op_in . ifPresent ( helper :: addValidationEvent ) ; } else { Set < String > inValues = new HashSet < String > ( Arrays . asList ( "query" , "header" , "cookie" ) ) ; if ( ! ( inValues . contains ( t . getIn ( ) . toString ( ) ) ) ) { final String message = Tr . formatMessage ( tc , "securitySchemeInFieldInvalid" , key , t . getIn ( ) . toString ( ) ) ; helper . addValidationEvent ( new ValidationEvent ( ValidationEvent . Severity . ERROR , context . getLocation ( ) , message ) ) ; } } } else if ( "http" . equals ( type ) ) { ValidatorUtils . validateRequiredField ( t . getScheme ( ) , context , "scheme" ) . ifPresent ( helper :: addValidationEvent ) ; } else if ( "oauth2" . equals ( type ) ) { ValidatorUtils . validateRequiredField ( t . getFlows ( ) , context , "flows" ) . ifPresent ( helper :: addValidationEvent ) ; } else if ( "openIdConnect" . equals ( type ) ) { Optional < ValidationEvent > op_url = ValidatorUtils . validateRequiredField ( t . getOpenIdConnectUrl ( ) , context , "openIdConnectUrl" ) ; if ( op_url . isPresent ( ) ) { op_url . ifPresent ( helper :: addValidationEvent ) ; } else { if ( ! ( ValidatorUtils . isValidURI ( t . getOpenIdConnectUrl ( ) ) ) ) { final String message = Tr . formatMessage ( tc , "securitySchemeInvalidURL" , t . getOpenIdConnectUrl ( ) ) ; helper . addValidationEvent ( new ValidationEvent ( ValidationEvent . Severity . ERROR , context . getLocation ( ) , message ) ) ; } } } // Issue warnings for non - applicable fields // ' bearerFormat ' field is only applicable to ' http ' type if ( t . getBearerFormat ( ) != null && ! t . getBearerFormat ( ) . isEmpty ( ) && ! "http" . equals ( type ) ) { final String message = Tr . formatMessage ( tc , "nonApplicableFieldWithValue" , "bearerFormat" , t . getBearerFormat ( ) , "Security Scheme Object" , type ) ; helper . addValidationEvent ( new ValidationEvent ( ValidationEvent . Severity . WARNING , context . getLocation ( ) , message ) ) ; } // ' scheme ' field is only applicable to ' http ' type if ( t . getScheme ( ) != null && ! t . getScheme ( ) . isEmpty ( ) && ! "http" . equals ( type ) ) { final String message = Tr . formatMessage ( tc , "nonApplicableFieldWithValue" , "scheme" , t . getScheme ( ) , "Security Scheme Object" , type ) ; helper . addValidationEvent ( new ValidationEvent ( ValidationEvent . Severity . WARNING , context . getLocation ( ) , message ) ) ; } // ' in ' field is only applicable to ' apiKey ' type if ( t . getIn ( ) != null && ! "apiKey" . equals ( type ) ) { final String message = Tr . formatMessage ( tc , "nonApplicableFieldWithValue" , "in" , t . getIn ( ) , "Security Scheme Object" , type ) ; helper . addValidationEvent ( new ValidationEvent ( ValidationEvent . Severity . WARNING , context . getLocation ( ) , message ) ) ; } // ' name ' field is only applicable to ' apiKey ' type if ( t . getName ( ) != null && ! t . getName ( ) . isEmpty ( ) && ! "apiKey" . equals ( type ) ) { final String message = Tr . formatMessage ( tc , "nonApplicableFieldWithValue" , "name" , t . getName ( ) , "Security Scheme Object" , type ) ; helper . addValidationEvent ( new ValidationEvent ( ValidationEvent . Severity . WARNING , context . getLocation ( ) , message ) ) ; } // ' openIdConnectUrl ' field is only applicable to ' openIdConnect ' type if ( t . getOpenIdConnectUrl ( ) != null && ! t . getOpenIdConnectUrl ( ) . isEmpty ( ) && ! "openIdConnect" . equals ( type ) ) { final String message = Tr . formatMessage ( tc , "nonApplicableFieldWithValue" , "openIdConnectUrl" , t . getOpenIdConnectUrl ( ) , "Security Scheme Object" , type ) ; helper . addValidationEvent ( new ValidationEvent ( ValidationEvent . Severity . WARNING , context . getLocation ( ) , message ) ) ; } // ' flows ' field is only applicable to ' oauth2 ' type if ( ! "oauth2" . equals ( type ) && ValidatorUtils . flowsIsSet ( t . getFlows ( ) ) ) { final String message = Tr . formatMessage ( tc , "nonApplicableField" , "flows" , "Security Scheme Object" , type ) ; helper . addValidationEvent ( new ValidationEvent ( ValidationEvent . Severity . WARNING , context . getLocation ( ) , message ) ) ; } }
public class AlgorithmParameters { /** * Initializes this parameter object using the parameters * specified in { @ code paramSpec } . * @ param paramSpec the parameter specification . * @ exception InvalidParameterSpecException if the given parameter * specification is inappropriate for the initialization of this parameter * object , or if this parameter object has already been initialized . */ public final void init ( AlgorithmParameterSpec paramSpec ) throws InvalidParameterSpecException { } }
if ( this . initialized ) throw new InvalidParameterSpecException ( "already initialized" ) ; paramSpi . engineInit ( paramSpec ) ; this . initialized = true ;
public class Ifc2x3tc1PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getIfcInventory ( ) { } }
if ( ifcInventoryEClass == null ) { ifcInventoryEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc2x3tc1Package . eNS_URI ) . getEClassifiers ( ) . get ( 281 ) ; } return ifcInventoryEClass ;
public class HostProcess { /** * Gets the count of requests sent ( and received ) by all { @ code Plugin } s and the { @ code Analyser } . * @ return the count of request sent * @ since 2.5.0 * @ see # getPluginRequestCount ( int ) * @ see # getAnalyser ( ) */ public int getRequestCount ( ) { } }
synchronized ( mapPluginStats ) { int count = requestCount + getAnalyser ( ) . getRequestCount ( ) ; for ( PluginStats stats : mapPluginStats . values ( ) ) { count += stats . getMessageCount ( ) ; } return count ; }
public class MediaClient { /** * Creates a new transcoder job which converts media files in BOS buckets with specified preset . * @ param pipelineName The name of pipeline used by this job . * @ param clips The keys of the source media file in the bucket specified in the pipeline . * @ param targetKey The key of the target media file in the bucket specified in the pipeline . * @ param presetName The name of the preset used by this job . * @ return The newly created job ID . */ public CreateTranscodingJobResponse createTranscodingJob ( String pipelineName , List < SourceClip > clips , String targetKey , String presetName ) { } }
return createTranscodingJob ( pipelineName , clips , targetKey , presetName , null , null ) ;
public class JNDIResourceService { /** * Associate a type with the given resource model . */ public void associateTypeJndiResource ( JNDIResourceModel resource , String type ) { } }
if ( type == null || resource == null ) { return ; } if ( StringUtils . equals ( type , "javax.sql.DataSource" ) && ! ( resource instanceof DataSourceModel ) ) { DataSourceModel ds = GraphService . addTypeToModel ( this . getGraphContext ( ) , resource , DataSourceModel . class ) ; } else if ( StringUtils . equals ( type , "javax.jms.Queue" ) && ! ( resource instanceof JmsDestinationModel ) ) { JmsDestinationModel jms = GraphService . addTypeToModel ( this . getGraphContext ( ) , resource , JmsDestinationModel . class ) ; jms . setDestinationType ( JmsDestinationType . QUEUE ) ; } else if ( StringUtils . equals ( type , "javax.jms.QueueConnectionFactory" ) && ! ( resource instanceof JmsConnectionFactoryModel ) ) { JmsConnectionFactoryModel jms = GraphService . addTypeToModel ( this . getGraphContext ( ) , resource , JmsConnectionFactoryModel . class ) ; jms . setConnectionFactoryType ( JmsDestinationType . QUEUE ) ; } else if ( StringUtils . equals ( type , "javax.jms.Topic" ) && ! ( resource instanceof JmsDestinationModel ) ) { JmsDestinationModel jms = GraphService . addTypeToModel ( this . getGraphContext ( ) , resource , JmsDestinationModel . class ) ; jms . setDestinationType ( JmsDestinationType . TOPIC ) ; } else if ( StringUtils . equals ( type , "javax.jms.TopicConnectionFactory" ) && ! ( resource instanceof JmsConnectionFactoryModel ) ) { JmsConnectionFactoryModel jms = GraphService . addTypeToModel ( this . getGraphContext ( ) , resource , JmsConnectionFactoryModel . class ) ; jms . setConnectionFactoryType ( JmsDestinationType . TOPIC ) ; }
public class AmazonCloudFrontClient { /** * Delete a streaming distribution . To delete an RTMP distribution using the CloudFront API , perform the following * steps . * < b > To delete an RTMP distribution using the CloudFront API < / b > : * < ol > * < li > * Disable the RTMP distribution . * < / li > * < li > * Submit a < code > GET Streaming Distribution Config < / code > request to get the current configuration and the * < code > Etag < / code > header for the distribution . * < / li > * < li > * Update the XML document that was returned in the response to your < code > GET Streaming Distribution Config < / code > * request to change the value of < code > Enabled < / code > to < code > false < / code > . * < / li > * < li > * Submit a < code > PUT Streaming Distribution Config < / code > request to update the configuration for your * distribution . In the request body , include the XML document that you updated in Step 3 . Then set the value of the * HTTP < code > If - Match < / code > header to the value of the < code > ETag < / code > header that CloudFront returned when you * submitted the < code > GET Streaming Distribution Config < / code > request in Step 2. * < / li > * < li > * Review the response to the < code > PUT Streaming Distribution Config < / code > request to confirm that the * distribution was successfully disabled . * < / li > * < li > * Submit a < code > GET Streaming Distribution Config < / code > request to confirm that your changes have propagated . * When propagation is complete , the value of < code > Status < / code > is < code > Deployed < / code > . * < / li > * < li > * Submit a < code > DELETE Streaming Distribution < / code > request . Set the value of the HTTP < code > If - Match < / code > * header to the value of the < code > ETag < / code > header that CloudFront returned when you submitted the * < code > GET Streaming Distribution Config < / code > request in Step 2. * < / li > * < li > * Review the response to your < code > DELETE Streaming Distribution < / code > request to confirm that the distribution * was successfully deleted . * < / li > * < / ol > * For information about deleting a distribution using the CloudFront console , see < a * href = " http : / / docs . aws . amazon . com / AmazonCloudFront / latest / DeveloperGuide / HowToDeleteDistribution . html " > Deleting a * Distribution < / a > in the < i > Amazon CloudFront Developer Guide < / i > . * @ param deleteStreamingDistributionRequest * The request to delete a streaming distribution . * @ return Result of the DeleteStreamingDistribution operation returned by the service . * @ throws AccessDeniedException * Access denied . * @ throws StreamingDistributionNotDisabledException * @ throws InvalidIfMatchVersionException * The < code > If - Match < / code > version is missing or not valid for the distribution . * @ throws NoSuchStreamingDistributionException * The specified streaming distribution does not exist . * @ throws PreconditionFailedException * The precondition given in one or more of the request - header fields evaluated to < code > false < / code > . * @ sample AmazonCloudFront . DeleteStreamingDistribution * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / cloudfront - 2018-11-05 / DeleteStreamingDistribution " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DeleteStreamingDistributionResult deleteStreamingDistribution ( DeleteStreamingDistributionRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDeleteStreamingDistribution ( request ) ;
public class filterhtmlinjectionvariable { /** * Use this API to fetch filtered set of filterhtmlinjectionvariable resources . * filter string should be in JSON format . eg : " port : 80 , servicetype : HTTP " . */ public static filterhtmlinjectionvariable [ ] get_filtered ( nitro_service service , String filter ) throws Exception { } }
filterhtmlinjectionvariable obj = new filterhtmlinjectionvariable ( ) ; options option = new options ( ) ; option . set_filter ( filter ) ; filterhtmlinjectionvariable [ ] response = ( filterhtmlinjectionvariable [ ] ) obj . getfiltered ( service , option ) ; return response ;
public class EncodedGradientsAccumulator { /** * This method applies accumulated updates via given StepFunction * @ param function * @ param params */ @ Override public void applyUpdate ( StepFunction function , INDArray params , INDArray updates , boolean isFinalStep ) { } }
if ( updatesApplied . get ( ) == null ) updatesApplied . set ( new AtomicLong ( 0 ) ) ; try { // nullify given updates first Nd4j . getMemoryManager ( ) . memset ( updates ) ; // updates . assign ( 0.0 ) ; int cnt = 0 ; while ( ! messages . get ( index . get ( ) ) . isEmpty ( ) ) { INDArray compressed = messages . get ( index . get ( ) ) . poll ( ) ; int encoding = compressed . data ( ) . getInt ( 3 ) ; if ( encoding == ThresholdCompression . FLEXIBLE_ENCODING ) Nd4j . getExecutioner ( ) . thresholdDecode ( compressed , updates ) ; else if ( encoding == ThresholdCompression . BITMAP_ENCODING ) Nd4j . getExecutioner ( ) . bitmapDecode ( compressed , updates ) ; else throw new DL4JInvalidConfigException ( "Unknown compression header received: " + encoding ) ; cnt ++ ; } if ( cnt > 0 && isDebug ) log . info ( "Local updates to be applied: {}" , cnt ) ; if ( externalSource != null ) { int ent = 0 ; if ( externalSource . hasAnything ( ) ) { externalSource . drainTo ( updates ) ; cnt ++ ; ent ++ ; } if ( isDebug ) log . info ( "thread {} finished at Externals" , Thread . currentThread ( ) . getId ( ) ) ; if ( ent > 0 && isDebug ) log . info ( "External updates to be applied: {}" , ent ) ; } if ( isFinalStep ) synchronize ( currentConsumers . get ( ) , isFinalStep ) ; // TODO : average updates probably ? if ( cnt > 0 ) { function . step ( params , updates ) ; updatesApplied . get ( ) . addAndGet ( cnt ) ; if ( isDebug ) log . info ( "Total updates applied so far for thread [{}]: [{}]" , Thread . currentThread ( ) . getName ( ) , updatesApplied . get ( ) ) ; } } catch ( Exception e ) { throwable . setIfFirst ( e ) ; throw new RuntimeException ( e ) ; }
public class GradientEditor { /** * Check if there is a control point at the specified mouse location * @ param mx The mouse x coordinate * @ param my The mouse y coordinate * @ param pt The point to check agianst * @ return True if the mouse point conincides with the control point */ private boolean checkPoint ( int mx , int my , ControlPoint pt ) { } }
int dx = ( int ) Math . abs ( ( 10 + ( width * pt . pos ) ) - mx ) ; int dy = Math . abs ( ( y + barHeight + 7 ) - my ) ; if ( ( dx < 5 ) && ( dy < 7 ) ) { return true ; } return false ;
public class PropsVectors { /** * Compact the vectors : * - modify the memory * - keep only unique vectors * - store them contiguously from the beginning of the memory * - for each ( non - unique ) row , call the respective function in * CompactHandler * The handler ' s rowIndex is the index of the row in the compacted * memory block . Therefore , it starts at 0 increases in increments of the * columns value . * In a first phase , only special values are delivered ( each exactly once ) . * Then CompactHandler : : startRealValues ( ) is called * where rowIndex is the length of the compacted array . * Then , in the second phase , the CompactHandler : : setRowIndexForRange ( ) is * called for each row of real values . */ public void compact ( CompactHandler compactor ) { } }
if ( isCompacted ) { return ; } // Set the flag now : Sorting and compacting destroys the builder // data structure . isCompacted = true ; int valueColumns = columns - 2 ; // not counting start & limit // sort the properties vectors to find unique vector values Integer [ ] indexArray = new Integer [ rows ] ; for ( int i = 0 ; i < rows ; ++ i ) { indexArray [ i ] = Integer . valueOf ( columns * i ) ; } Arrays . sort ( indexArray , new Comparator < Integer > ( ) { @ Override public int compare ( Integer o1 , Integer o2 ) { int indexOfRow1 = o1 . intValue ( ) ; int indexOfRow2 = o2 . intValue ( ) ; int count = columns ; // includes start / limit columns // start comparing after start / limit // but wrap around to them int index = 2 ; do { if ( v [ indexOfRow1 + index ] != v [ indexOfRow2 + index ] ) { return v [ indexOfRow1 + index ] < v [ indexOfRow2 + index ] ? - 1 : 1 ; } if ( ++ index == columns ) { index = 0 ; } } while ( -- count > 0 ) ; return 0 ; } } ) ; /* * Find and set the special values . This has to do almost the same work * as the compaction below , to find the indexes where the special - value * rows will move . */ int count = - valueColumns ; for ( int i = 0 ; i < rows ; ++ i ) { int start = v [ indexArray [ i ] . intValue ( ) ] ; // count a new values vector if it is different // from the current one if ( count < 0 || ! areElementsSame ( indexArray [ i ] . intValue ( ) + 2 , v , indexArray [ i - 1 ] . intValue ( ) + 2 , valueColumns ) ) { count += valueColumns ; } if ( start == INITIAL_VALUE_CP ) { compactor . setRowIndexForInitialValue ( count ) ; } else if ( start == ERROR_VALUE_CP ) { compactor . setRowIndexForErrorValue ( count ) ; } } // count is at the beginning of the last vector , // add valueColumns to include that last vector count += valueColumns ; // Call the handler once more to signal the start of // delivering real values . compactor . startRealValues ( count ) ; /* * Move vector contents up to a contiguous array with only unique * vector values , and call the handler function for each vector . * This destroys the Properties Vector structure and replaces it * with an array of just vector values . */ int [ ] temp = new int [ count ] ; count = - valueColumns ; for ( int i = 0 ; i < rows ; ++ i ) { int start = v [ indexArray [ i ] . intValue ( ) ] ; int limit = v [ indexArray [ i ] . intValue ( ) + 1 ] ; // count a new values vector if it is different // from the current one if ( count < 0 || ! areElementsSame ( indexArray [ i ] . intValue ( ) + 2 , temp , count , valueColumns ) ) { count += valueColumns ; System . arraycopy ( v , indexArray [ i ] . intValue ( ) + 2 , temp , count , valueColumns ) ; } if ( start < FIRST_SPECIAL_CP ) { compactor . setRowIndexForRange ( start , limit - 1 , count ) ; } } v = temp ; // count is at the beginning of the last vector , // add one to include that last vector rows = count / valueColumns + 1 ;
public class BasicUserProfile { /** * Build a profile from user identifier and attributes . * @ param id user identifier * @ param attributes user attributes */ public void build ( final Object id , final Map < String , Object > attributes ) { } }
setId ( ProfileHelper . sanitizeIdentifier ( this , id ) ) ; addAttributes ( attributes ) ;
public class H2ONode { /** * * interned * : there is only one per InetAddress . */ public static final H2ONode intern ( H2Okey key ) { } }
H2ONode h2o = INTERN . get ( key ) ; if ( h2o != null ) return h2o ; final int idx = UNIQUE . getAndIncrement ( ) ; h2o = new H2ONode ( key , idx ) ; H2ONode old = INTERN . putIfAbsent ( key , h2o ) ; if ( old != null ) return old ; synchronized ( H2O . class ) { while ( idx >= IDX . length ) IDX = Arrays . copyOf ( IDX , IDX . length << 1 ) ; IDX [ idx ] = h2o ; } return h2o ;
public class TextFieldExample { /** * Override preparePaintComponent to test that dynamic attributes are handled correctly . * @ param request the request that triggered the paint . */ @ Override protected void preparePaintComponent ( final Request request ) { } }
super . preparePaintComponent ( request ) ; if ( ! isInitialised ( ) ) { tf3 . setText ( "This is read only." ) ; tf4 . setText ( "This is disabled." ) ; readFields ( ) ; setInitialised ( true ) ; }
public class BatchTask { /** * Cancels all tasks via their { @ link ChainedDriver # cancelTask ( ) } method . Any occurring exception * and error is suppressed , such that the canceling method of every task is invoked in all cases . * @ param tasks The tasks to be canceled . */ public static void cancelChainedTasks ( List < ChainedDriver < ? , ? > > tasks ) { } }
for ( int i = 0 ; i < tasks . size ( ) ; i ++ ) { try { tasks . get ( i ) . cancelTask ( ) ; } catch ( Throwable t ) { // do nothing } }
public class Grep { /** * TODO : color */ public static void main ( String [ ] args ) throws IOException { } }
if ( args . length != 2 ) { System . err . println ( "Syntax: grep <pattern> <glob>" ) ; System . exit ( - 1 ) ; } Grep grep = new Grep ( Paths . get ( "." ) , args [ 1 ] . split ( "\\s+" ) ) ; Map < Path , List < LineMatches > > pathMatches = grep . find ( args [ 0 ] ) ; for ( Path path : pathMatches . keySet ( ) ) { System . out . println ( path + ":" ) ; int maxIndex = pathMatches . get ( path ) . stream ( ) . reduce ( ( max , lineMatches ) -> { return max == null || lineMatches . index > max . index ? lineMatches : max ; } ) . get ( ) . index ; int padLen = 2 + String . valueOf ( maxIndex ) . length ( ) ; for ( LineMatches lineMatches : pathMatches . get ( path ) ) { String idx = String . valueOf ( lineMatches . index ) ; while ( idx . length ( ) < padLen ) { idx = " " + idx ; } System . out . println ( idx + ": " + lineMatches . line ) ; } } if ( grep . count >= grep . limit ) System . err . println ( "(Showing first " + grep . count + " results)" ) ; System . exit ( grep . count ) ;
public class JCuda { /** * Prefetches memory to the specified destination device * < br > * Prefetches memory to the specified destination device . devPtr is the * base device pointer of the memory to be prefetched and dstDevice is the * destination device . count specifies the number of bytes to copy . stream * is the stream in which the operation is enqueued . < br > * < br > * Passing in cudaCpuDeviceId for dstDevice will prefetch the data to CPU memory . < br > * < br > * If no physical memory has been allocated for this region , then this memory region * will be populated and mapped on the destination device . If there ' s insufficient * memory to prefetch the desired region , the Unified Memory driver may evict pages * belonging to other memory regions to make room . If there ' s no memory that can be * evicted , then the Unified Memory driver will prefetch less than what was requested . < br > * < br > * In the normal case , any mappings to the previous location of the migrated pages are * removed and mappings for the new location are only setup on the dstDevice . * The application can exercise finer control on these mappings using : : cudaMemAdvise . < br > * < br > * Note that this function is asynchronous with respect to the host and all work * on other devices . * @ param devPtr Pointer to be prefetched * @ param count Size in bytes * @ param dstDevice Destination device to prefetch to * @ param stream Stream to enqueue prefetch operation * @ return cudaSuccess , cudaErrorInvalidValue , cudaErrorInvalidDevice * @ see JCuda # cudaMemcpy * @ see JCuda # cudaMemcpyPeer * @ see JCuda # cudaMemcpyAsync * @ see JCuda # cudaMemcpy3DPeerAsync * @ see JCuda # cudaMemAdvise */ public static int cudaMemPrefetchAsync ( Pointer devPtr , long count , int dstDevice , cudaStream_t stream ) { } }
return checkResult ( cudaMemPrefetchAsyncNative ( devPtr , count , dstDevice , stream ) ) ;
public class WSX509TrustManager { /** * object prop */ private String getProperty ( String propertyName , Properties prop , boolean processIsServer ) { } }
String value = null ; if ( prop != null ) { // if client process , get system prop first , global prop second , // then sslconfig or keystore prop third ( for override compatibility ) if ( ! processIsServer ) { value = System . getProperty ( propertyName ) ; if ( value == null ) { value = SSLConfigManager . getInstance ( ) . getGlobalProperty ( propertyName ) ; } } if ( value == null ) { value = prop . getProperty ( propertyName ) ; } } else { value = System . getProperty ( propertyName ) ; if ( value == null ) { value = SSLConfigManager . getInstance ( ) . getGlobalProperty ( propertyName ) ; } } return value ;
public class MultiIndex { /** * Returns the number of documents in this index . * @ return the number of documents in this index . * @ throws IOException * if an error occurs while reading from the index . */ int numDocs ( ) throws IOException { } }
if ( indexNames . size ( ) == 0 ) { return volatileIndex . getNumDocuments ( ) ; } else { CachingMultiIndexReader reader = getIndexReader ( ) ; try { return reader . numDocs ( ) ; } finally { reader . release ( ) ; } }
public class SystemContent { /** * Projects a single property definition onto the provided graph under the location of < code > nodeTypePath < / code > . The * operations needed to create the property definition and any of its properties will be added to the batch specified in * < code > batch < / code > . * All node creation is performed through the graph layer . If the primary type of the node at < code > nodeTypePath < / code > does * not contain a residual definition that allows child nodes of type < code > nt : propertyDefinition < / code > , this method creates * nodes for which the JCR layer cannot determine the corresponding node definition . This WILL corrupt the graph from a JCR * standpoint and make it unusable through the ModeShape JCR layer . * @ param nodeTypeNode the parent node under which each property definition should be saved ; may not be null * @ param propertyDef the property definition to be projected */ private void store ( MutableCachedNode nodeTypeNode , JcrPropertyDefinition propertyDef ) { } }
// Find an existing node for this property definition . . . final NodeKey key = propertyDef . key ( ) ; final Name name = propertyDef . getInternalName ( ) ; MutableCachedNode propDefnNode = null ; if ( ! nodeTypeNode . isNew ( ) ) { if ( nodeTypeNode . getChildReferences ( system ) . hasChild ( key ) ) { // The node already exists . . . propDefnNode = system . mutable ( key ) ; } } List < Property > properties = new ArrayList < Property > ( ) ; properties . add ( propertyFactory . create ( JcrLexicon . PRIMARY_TYPE , JcrNtLexicon . PROPERTY_DEFINITION ) ) ; if ( ! JcrNodeType . RESIDUAL_ITEM_NAME . equals ( propertyDef . getName ( ) ) ) { properties . add ( propertyFactory . create ( JcrLexicon . NAME , name ) ) ; } properties . add ( propertyFactory . create ( JcrLexicon . AUTO_CREATED , propertyDef . isAutoCreated ( ) ) ) ; properties . add ( propertyFactory . create ( JcrLexicon . MANDATORY , propertyDef . isMandatory ( ) ) ) ; properties . add ( propertyFactory . create ( JcrLexicon . MULTIPLE , propertyDef . isMultiple ( ) ) ) ; properties . add ( propertyFactory . create ( JcrLexicon . PROTECTED , propertyDef . isProtected ( ) ) ) ; properties . add ( propertyFactory . create ( JcrLexicon . ON_PARENT_VERSION , OnParentVersionAction . nameFromValue ( propertyDef . getOnParentVersion ( ) ) ) ) ; properties . add ( propertyFactory . create ( JcrLexicon . REQUIRED_TYPE , org . modeshape . jcr . api . PropertyType . nameFromValue ( propertyDef . getRequiredType ( ) ) . toUpperCase ( ) ) ) ; List < String > symbols = new ArrayList < String > ( ) ; for ( String value : propertyDef . getAvailableQueryOperators ( ) ) { if ( value != null ) symbols . add ( value ) ; } properties . add ( propertyFactory . create ( JcrLexicon . AVAILABLE_QUERY_OPERATORS , symbols ) ) ; Value [ ] defaultValues = propertyDef . getDefaultValues ( ) ; if ( defaultValues != null && defaultValues . length > 0 ) { String [ ] defaultsAsString = new String [ defaultValues . length ] ; for ( int i = 0 ; i < defaultValues . length ; i ++ ) { try { defaultsAsString [ i ] = defaultValues [ i ] . getString ( ) ; } catch ( RepositoryException re ) { // Really shouldn ' t get here as all values are convertible to string throw new IllegalStateException ( re ) ; } } properties . add ( propertyFactory . create ( JcrLexicon . DEFAULT_VALUES , defaultsAsString ) ) ; } String [ ] valueConstraints = propertyDef . getValueConstraints ( ) ; if ( valueConstraints . length > 0 ) { properties . add ( propertyFactory . create ( JcrLexicon . VALUE_CONSTRAINTS , valueConstraints ) ) ; } // Now either update the existing node or create a new node . . if ( propDefnNode != null ) { // Update the properties . . . propDefnNode . setProperties ( system , properties ) ; } else { // We have to create the node type node . . . propDefnNode = nodeTypeNode . createChild ( system , key , name , properties ) ; }
public class From { /** * Creates and chains a GroupBy object to group the query result . * @ param expressions The group by expression . * @ return The GroupBy object that represents the GROUP BY clause of the query . */ @ NonNull @ Override public GroupBy groupBy ( @ NonNull Expression ... expressions ) { } }
if ( expressions == null ) { throw new IllegalArgumentException ( "expressions cannot be null." ) ; } return new GroupBy ( this , Arrays . asList ( expressions ) ) ;
public class BNFHeadersImpl { /** * Method to marshall all instances of a particular header into the * input buffers ( expanding them if need be ) . * @ param inBuffers * @ param elem * @ return WsByteBuffer [ ] */ protected WsByteBuffer [ ] marshallHeader ( WsByteBuffer [ ] inBuffers , HeaderElement elem ) { } }
if ( elem . wasRemoved ( ) ) { return inBuffers ; } WsByteBuffer [ ] buffers = inBuffers ; final byte [ ] value = elem . asRawBytes ( ) ; if ( null != value ) { buffers = putBytes ( elem . getKey ( ) . getMarshalledByteArray ( foundCompactHeader ( ) ) , buffers ) ; buffers = putBytes ( value , elem . getOffset ( ) , elem . getValueLength ( ) , buffers ) ; buffers = putBytes ( BNFHeaders . EOL , buffers ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) { Tr . event ( tc , "Marshalling: " + elem . getKey ( ) + " [" + elem . getDebugValue ( ) + "]" ) ; } } return buffers ;
public class SessionAttributeInitializingFilter { /** * Sets the attribute map . The specified attributes are copied into the * underlying map , so modifying the specified attributes parameter after * the call won ' t change the internal state . */ public void setAttributes ( Map < String , ? > attributes ) { } }
if ( attributes == null ) { attributes = new HashMap < > ( ) ; } this . attributes . clear ( ) ; this . attributes . putAll ( attributes ) ;
public class XGenBuff { /** * Special Case where code is dynamic , so give access to State and Trans info * @ param state * @ param trans * @ param cache * @ param code * @ throws APIException * @ throws IOException */ @ SuppressWarnings ( { } }
"unchecked" , "rawtypes" } ) public void run ( State < Env > state , Trans trans , Cache cache , DynamicCode code ) throws APIException , IOException { code . code ( state , trans , cache , xgen ) ;
public class FileUtil { /** * 删除目录及所有子目录 / 文件 * @ see { @ link Files # walkFileTree } */ public static void deleteDir ( Path dir ) throws IOException { } }
Validate . isTrue ( isDirExists ( dir ) , "%s is not exist or not a dir" , dir ) ; // 后序遍历 , 先删掉子目录中的文件 / 目录 Files . walkFileTree ( dir , deleteFileVisitor ) ;
public class JobApi { /** * Download a single artifact file from within the job ' s artifacts archive . * Only a single file is going to be extracted from the archive and streamed to a client . * < pre > < code > GitLab Endpoint : GET / projects / : id / jobs / : job _ id / artifacts / * artifact _ path < / code > < / pre > * @ param projectIdOrPath id , path of the project , or a Project instance holding the project ID or path * @ param jobId the unique job identifier * @ param artifactPath the Path to a file inside the artifacts archive * @ return an InputStream to read the specified artifacts file from * @ throws GitLabApiException if any exception occurs */ public InputStream downloadSingleArtifactsFile ( Object projectIdOrPath , Integer jobId , Path artifactPath ) throws GitLabApiException { } }
String path = artifactPath . toString ( ) . replace ( "\\" , "/" ) ; Response response = get ( Response . Status . OK , getDefaultPerPageParam ( ) , "projects" , getProjectIdOrPath ( projectIdOrPath ) , "jobs" , jobId , "artifacts" , path ) ; return ( response . readEntity ( InputStream . class ) ) ;
public class ImageManipulation { /** * Zooms either in or out of an image by a supplied amount . The zooming * occurs from the center of the image . * @ param ip * The image to zoom zoomAmt The amount to zoom the image . 0 < * zoomAmt < 1 : zoom out 1 = zoomAmt : original image 1 < zoomAmt : * zoom in * @ return The image zoomed */ private ImageProcessor zoom ( ImageProcessor ip , String zoomAmt ) { } }
if ( zoomAmt != null ) { try { float zoom = Float . parseFloat ( zoomAmt ) ; if ( zoom < 0 ) { return ip ; } ip . scale ( zoom , zoom ) ; // if the image is being zoomed out , trim the extra whitespace around the image if ( zoom < 1 ) { int imgWidth = ip . getWidth ( ) ; int imgHeight = ip . getHeight ( ) ; // set a ROI around the image , minus the extra whitespace ip . setRoi ( Math . round ( imgWidth / 2 - imgWidth * zoom / 2 ) , Math . round ( imgHeight / 2 - imgHeight * zoom / 2 ) , Math . round ( imgWidth * zoom ) , Math . round ( imgHeight * zoom ) ) ; ip = ip . crop ( ) ; } } // no need to do anything with number format exception since the servlet // returns only images ; just return the original image catch ( NumberFormatException e ) { } } return ip ;
public class RdmaServerEndpoint { /** * Bind this server endpoint to a specific IP address / port . * @ param src ( rdma : / / host : port ) * @ return the rdma server endpoint * @ throws Exception the exception */ public synchronized RdmaServerEndpoint < C > bind ( SocketAddress src , int backlog ) throws Exception { } }
if ( connState != CONN_STATE_INITIALIZED ) { throw new IOException ( "endpoint has to be disconnected for bind" ) ; } connState = CONN_STATE_READY_FOR_ACCEPT ; idPriv . bindAddr ( src ) ; idPriv . listen ( backlog ) ; this . pd = group . createProtectionDomainRaw ( this ) ; logger . info ( "PD value " + pd . getHandle ( ) ) ; return this ;
public class WxApi2Impl { /** * 模板消息 */ @ Override public WxResp template_api_set_industry ( String industry_id1 , String industry_id2 ) { } }
return postJson ( "/template/api_set_industry" , "industry_id1" , industry_id1 , "industry_id2" , industry_id2 ) ;
public class NettyHandler { /** * Handles a response . */ private void handleResponse ( ByteBuf response , ChannelHandlerContext context ) { } }
NettyConnection connection = getConnection ( context . channel ( ) ) ; if ( connection != null ) { connection . handleResponse ( response ) ; }
public class ControllerHandler { /** * Validates that the declared consumes can actually be processed by Fathom . * @ param fathomContentTypes */ protected void validateConsumes ( Collection < String > fathomContentTypes ) { } }
Set < String > ignoreConsumes = new TreeSet < > ( ) ; ignoreConsumes . add ( Consumes . ALL ) ; // these are handled by the TemplateEngine ignoreConsumes . add ( Consumes . HTML ) ; ignoreConsumes . add ( Consumes . XHTML ) ; // these are handled by the Servlet layer ignoreConsumes . add ( Consumes . FORM ) ; ignoreConsumes . add ( Consumes . MULTIPART ) ; for ( String declaredConsume : declaredConsumes ) { if ( ignoreConsumes . contains ( declaredConsume ) ) { continue ; } String consume = declaredConsume ; int fuzz = consume . indexOf ( '*' ) ; if ( fuzz > - 1 ) { // strip fuzz , we must have a registered engine for the unfuzzed content - type consume = consume . substring ( 0 , fuzz ) ; } if ( ! fathomContentTypes . contains ( consume ) ) { if ( consume . equals ( declaredConsume ) ) { throw new FatalException ( "{} declares @{}(\"{}\") but there is no registered ContentTypeEngine for that type!" , Util . toString ( method ) , Consumes . class . getSimpleName ( ) , declaredConsume ) ; } else { throw new FatalException ( "{} declares @{}(\"{}\") but there is no registered ContentTypeEngine for \"{}\"!" , Util . toString ( method ) , Consumes . class . getSimpleName ( ) , declaredConsume , consume ) ; } } }
public class ReflectUtil { /** * Get all fields * @ param clz * @ param includeStatic include static fields or not * @ return */ public static Field [ ] getAllFields ( Class < ? > clz , boolean includeStatic ) { } }
return Arrays . stream ( getAllFields ( clz ) ) . filter ( f -> includeStatic || ! Modifier . isStatic ( f . getModifiers ( ) ) ) . toArray ( Field [ ] :: new ) ;
public class CreateDevicePoolRequest { /** * The device pool ' s rules . * @ param rules * The device pool ' s rules . */ public void setRules ( java . util . Collection < Rule > rules ) { } }
if ( rules == null ) { this . rules = null ; return ; } this . rules = new java . util . ArrayList < Rule > ( rules ) ;
public class DiscordWebSocketAdapter { /** * Disconnects from the websocket . */ public void disconnect ( ) { } }
reconnect = false ; websocket . get ( ) . sendClose ( WebSocketCloseReason . DISCONNECT . getNumericCloseCode ( ) ) ; // cancel heartbeat timer if within one minute no disconnect event was dispatched api . getThreadPool ( ) . getDaemonScheduler ( ) . schedule ( ( ) -> heartbeatTimer . updateAndGet ( future -> { if ( future != null ) { future . cancel ( false ) ; } return null ; } ) , 1 , TimeUnit . MINUTES ) ;
public class VirtualMachinesInner { /** * The operation to start a virtual machine . * @ param resourceGroupName The name of the resource group . * @ param vmName The name of the virtual machine . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < OperationStatusResponseInner > startAsync ( String resourceGroupName , String vmName , final ServiceCallback < OperationStatusResponseInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( startWithServiceResponseAsync ( resourceGroupName , vmName ) , serviceCallback ) ;
public class UtilityExtensions { /** * Convenience method . */ public static String formatInput ( Replaceable input , Transliterator . Position pos ) { } }
return formatInput ( ( ReplaceableString ) input , pos ) ;
public class BaseDatabase { /** * Free this database object . */ public void free ( ) { } }
this . close ( ) ; while ( m_vTableList . size ( ) > 0 ) { BaseTable table = m_vTableList . elementAt ( 0 ) ; table . free ( ) ; } m_vTableList . removeAllElements ( ) ; m_vTableList = null ; if ( m_databaseOwner != null ) m_databaseOwner . removeDatabase ( this ) ; m_databaseOwner = null ; m_strDbName = null ;
public class ModelAnnotation { /** * Gets the attribute as boolean . * @ param attribute the attribute * @ return the attribute as boolean */ public boolean getAttributeAsBoolean ( AnnotationAttributeType attribute ) { } }
String temp = attributes . get ( attribute . getValue ( ) ) ; return Boolean . parseBoolean ( temp ) ;
public class BinaryJedis { /** * Return all the values in a hash . * < b > Time complexity : < / b > O ( N ) , where N is the total number of entries * @ param key * @ return All the fields values contained into a hash . */ @ Override public List < byte [ ] > hvals ( final byte [ ] key ) { } }
checkIsInMultiOrPipeline ( ) ; client . hvals ( key ) ; return client . getBinaryMultiBulkReply ( ) ;
public class Parsers { /** * Parser that tries , in this order : * < ul > * < li > ResultType . fromString ( String ) * < li > ResultType . decode ( String ) * < li > ResultType . valueOf ( String ) * < li > new ResultType ( String ) * < / ul > */ public static < T > Parser < T > conventionalParser ( Class < T > resultType ) throws NoSuchMethodException { } }
if ( resultType == String . class ) { @ SuppressWarnings ( "unchecked" ) // T = = String Parser < T > identity = ( Parser < T > ) IDENTITY ; return identity ; } final Class < T > wrappedResultType = Primitives . wrap ( resultType ) ; for ( String methodName : CONVERSION_METHOD_NAMES ) { try { final Method method = wrappedResultType . getDeclaredMethod ( methodName , String . class ) ; if ( Util . isStatic ( method ) && wrappedResultType . isAssignableFrom ( method . getReturnType ( ) ) ) { method . setAccessible ( true ) ; // to permit inner enums , etc . return new InvokingParser < T > ( ) { @ Override protected T invoke ( String input ) throws Exception { return wrappedResultType . cast ( method . invoke ( null , input ) ) ; } } ; } } catch ( Exception tryAgain ) { } } final Constructor < T > constr = wrappedResultType . getDeclaredConstructor ( String . class ) ; constr . setAccessible ( true ) ; return new InvokingParser < T > ( ) { @ Override protected T invoke ( String input ) throws Exception { return wrappedResultType . cast ( constr . newInstance ( input ) ) ; } } ;
public class RmpAppirater { /** * Modify internal value . * If you use this method , you might need to have a good understanding of this class code . * @ param context Context * @ param reminderClickDate Date of " Remind me later " button clicked . */ public static void setReminderClickDate ( Context context , Date reminderClickDate ) { } }
final long reminderClickDateMills = ( ( reminderClickDate != null ) ? reminderClickDate . getTime ( ) : 0 ) ; SharedPreferences prefs = getSharedPreferences ( context ) ; SharedPreferences . Editor prefsEditor = prefs . edit ( ) ; prefsEditor . putLong ( PREF_KEY_REMINDER_CLICK_DATE , reminderClickDateMills ) ; prefsEditor . commit ( ) ;
public class AbstractDirector { /** * Checks if filesInstalled includes scripts in a bin folder . * @ param filesInstalled the list of files that are installed * @ return true if at least one file is in the bin path */ boolean containScript ( List < File > filesInstalled ) { } }
for ( File f : filesInstalled ) { String path = f . getAbsolutePath ( ) . toLowerCase ( ) ; if ( path . contains ( "/bin/" ) || path . contains ( "\\bin\\" ) ) { return true ; } } return false ;
public class SecurityUtils { /** * Returns the profile from authentication attribute from the current request . * @ return the profile object , or null if there ' s no authentication */ public static Profile getCurrentProfile ( ) { } }
RequestContext context = RequestContext . getCurrent ( ) ; if ( context != null ) { return getProfile ( context . getRequest ( ) ) ; } else { return null ; }
public class AWSOrganizationsClient { /** * Lists the organizational units ( OUs ) in a parent organizational unit or root . * < note > * Always check the < code > NextToken < / code > response parameter for a < code > null < / code > value when calling a * < code > List * < / code > operation . These operations can occasionally return an empty set of results even when there * are more results available . The < code > NextToken < / code > response parameter value is < code > null < / code > < i > only < / i > * when there are no more results to display . * < / note > * This operation can be called only from the organization ' s master account . * @ param listOrganizationalUnitsForParentRequest * @ return Result of the ListOrganizationalUnitsForParent operation returned by the service . * @ throws AccessDeniedException * You don ' t have permissions to perform the requested operation . The user or role that is making the * request must have at least one IAM permissions policy attached that grants the required permissions . For * more information , see < a href = " https : / / docs . aws . amazon . com / IAM / latest / UserGuide / access . html " > Access * Management < / a > in the < i > IAM User Guide < / i > . * @ throws AWSOrganizationsNotInUseException * Your account isn ' t a member of an organization . To make this request , you must use the credentials of an * account that belongs to an organization . * @ throws InvalidInputException * The requested operation failed because you provided invalid values for one or more of the request * parameters . This exception includes a reason that contains additional information about the violated * limit : < / p > < note > * Some of the reasons in the following list might not be applicable to this specific API or operation : * < / note > * < ul > * < li > * IMMUTABLE _ POLICY : You specified a policy that is managed by AWS and can ' t be modified . * < / li > * < li > * INPUT _ REQUIRED : You must include a value for all required parameters . * < / li > * < li > * INVALID _ ENUM : You specified a value that isn ' t valid for that parameter . * < / li > * < li > * INVALID _ FULL _ NAME _ TARGET : You specified a full name that contains invalid characters . * < / li > * < li > * INVALID _ LIST _ MEMBER : You provided a list to a parameter that contains at least one invalid value . * < / li > * < li > * INVALID _ PARTY _ TYPE _ TARGET : You specified the wrong type of entity ( account , organization , or email ) as a * party . * < / li > * < li > * INVALID _ PAGINATION _ TOKEN : Get the value for the < code > NextToken < / code > parameter from the response to a * previous call of the operation . * < / li > * < li > * INVALID _ PATTERN : You provided a value that doesn ' t match the required pattern . * < / li > * < li > * INVALID _ PATTERN _ TARGET _ ID : You specified a policy target ID that doesn ' t match the required pattern . * < / li > * < li > * INVALID _ ROLE _ NAME : You provided a role name that isn ' t valid . A role name can ' t begin with the reserved * prefix < code > AWSServiceRoleFor < / code > . * < / li > * < li > * INVALID _ SYNTAX _ ORGANIZATION _ ARN : You specified an invalid Amazon Resource Name ( ARN ) for the * organization . * < / li > * < li > * INVALID _ SYNTAX _ POLICY _ ID : You specified an invalid policy ID . * < / li > * < li > * MAX _ FILTER _ LIMIT _ EXCEEDED : You can specify only one filter parameter for the operation . * < / li > * < li > * MAX _ LENGTH _ EXCEEDED : You provided a string parameter that is longer than allowed . * < / li > * < li > * MAX _ VALUE _ EXCEEDED : You provided a numeric parameter that has a larger value than allowed . * < / li > * < li > * MIN _ LENGTH _ EXCEEDED : You provided a string parameter that is shorter than allowed . * < / li > * < li > * MIN _ VALUE _ EXCEEDED : You provided a numeric parameter that has a smaller value than allowed . * < / li > * < li > * MOVING _ ACCOUNT _ BETWEEN _ DIFFERENT _ ROOTS : You can move an account only between entities in the same root . * < / li > * @ throws ParentNotFoundException * We can ' t find a root or OU with the < code > ParentId < / code > that you specified . * @ throws ServiceException * AWS Organizations can ' t complete your request because of an internal service error . Try again later . * @ throws TooManyRequestsException * You ' ve sent too many requests in too short a period of time . The limit helps protect against * denial - of - service attacks . Try again later . < / p > * For information on limits that affect Organizations , see < a * href = " https : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ reference _ limits . html " > Limits of * AWS Organizations < / a > in the < i > AWS Organizations User Guide < / i > . * @ sample AWSOrganizations . ListOrganizationalUnitsForParent * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / organizations - 2016-11-28 / ListOrganizationalUnitsForParent " * target = " _ top " > AWS API Documentation < / a > */ @ Override public ListOrganizationalUnitsForParentResult listOrganizationalUnitsForParent ( ListOrganizationalUnitsForParentRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeListOrganizationalUnitsForParent ( request ) ;
public class AndroidJus { /** * Creates a default instance of the worker pool and calls { @ link RequestQueue # start ( ) } on it . * @ param context A { @ link Context } to use for creating the cache dir . * @ param stack An { @ link HttpStack } to use for the network , or null for default . * @ return A started { @ link RequestQueue } instance . */ public static RequestQueue newRequestQueue ( Context context , HttpStack stack ) { } }
JusLog . log = new ALog ( ) ; File cacheDir = new File ( context . getCacheDir ( ) , DEFAULT_CACHE_DIR ) ; String userAgent = "jus/0" ; try { String packageName = context . getPackageName ( ) ; PackageInfo info = context . getPackageManager ( ) . getPackageInfo ( packageName , 0 ) ; userAgent = packageName + "/" + info . versionCode ; } catch ( NameNotFoundException e ) { } if ( stack == null ) { stack = new HurlStack ( ) ; } Network network = new HttpNetwork ( stack ) ; RequestQueue queue = new RequestQueue ( new DiskBasedCache ( cacheDir ) , network , RequestQueue . DEFAULT_NETWORK_THREAD_POOL_SIZE , new AndroidExecutorDelivery ( new Handler ( Looper . getMainLooper ( ) ) ) ) ; queue . withCacheDispatcher ( new AndroidCacheDispatcher ( queue . cacheQueue , queue . networkQueue , queue . cache , queue . delivery ) ) . withNetworkDispatcherFactory ( new AndroidNetworkDispatcher . NetworkDispatcherFactory ( queue . networkQueue , queue . network , queue . cache , queue . delivery ) ) ; queue . start ( ) ; return queue ;
public class GenIdUtil { /** * 生成 Id * @ param target * @ param property * @ param genClass * @ param table * @ param column * @ throws MapperException */ public static void genId ( Object target , String property , Class < ? extends GenId > genClass , String table , String column ) throws MapperException { } }
try { GenId genId ; if ( CACHE . containsKey ( genClass ) ) { genId = CACHE . get ( genClass ) ; } else { LOCK . lock ( ) ; try { if ( ! CACHE . containsKey ( genClass ) ) { CACHE . put ( genClass , genClass . newInstance ( ) ) ; } genId = CACHE . get ( genClass ) ; } finally { LOCK . unlock ( ) ; } } MetaObject metaObject = MetaObjectUtil . forObject ( target ) ; if ( metaObject . getValue ( property ) == null ) { Object id = genId . genId ( table , column ) ; metaObject . setValue ( property , id ) ; } } catch ( Exception e ) { throw new MapperException ( "生成 ID 失败!" , e ) ; }
public class ArrayFile { /** * Updates the length of this ArrayFile to the specified value . * @ param arrayLength - the new array length * @ param renameToFile - the copy of this ArrayFile . If < code > null < / code > , no backup copy will be created . * @ throws IOException */ public synchronized void setArrayLength ( int arrayLength , File renameToFile ) throws IOException { } }
if ( arrayLength < 0 ) { throw new IOException ( "Invalid array length: " + arrayLength ) ; } if ( this . _arrayLength == arrayLength ) return ; // Flush all the changes . this . flush ( ) ; // Change the file length . long fileLength = DATA_START_POSITION + ( ( long ) arrayLength * _elementSize ) ; RandomAccessFile raf = new RandomAccessFile ( _file , "rw" ) ; try { raf . setLength ( fileLength ) ; } catch ( IOException e ) { _log . error ( "failed to setArrayLength " + arrayLength ) ; throw e ; } finally { raf . close ( ) ; } // Write the new array length . writeArrayLength ( arrayLength ) ; this . flush ( ) ; if ( renameToFile != null ) { if ( _file . renameTo ( renameToFile ) ) { _writer . close ( ) ; _file = renameToFile ; _writer = IOFactory . createDataWriter ( _file , _type ) ; _writer . open ( ) ; return ; } else { _log . warn ( "Failed to rename " + _file . getAbsolutePath ( ) + " to " + renameToFile . getAbsolutePath ( ) ) ; } } if ( MultiMappedWriter . class . isInstance ( _writer ) ) { ( ( MultiMappedWriter ) _writer ) . remap ( ) ; _log . info ( "remap " + _file . getPath ( ) + " " + _file . length ( ) ) ; } else { _writer . close ( ) ; _writer = IOFactory . createDataWriter ( _file , _type ) ; _writer . open ( ) ; }
public class ServiceRegistry { /** * Retrieves service registered under given name * @ param name name of the service * @ return instance of the service registered with given name * @ throws IllegalArgumentException thrown in case service with given name is not registered */ public Object service ( String name ) { } }
Object service = this . services . get ( name ) ; if ( service == null ) { throw new IllegalArgumentException ( "Service '" + name + "' not found" ) ; } return service ;
public class IotHubResourcesInner { /** * Get the list of valid SKUs for an IoT hub . * Get the list of valid SKUs for an IoT hub . * ServiceResponse < PageImpl < IotHubSkuDescriptionInner > > * @ param nextPageLink The NextLink from the previous successful call to List operation . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the PagedList & lt ; IotHubSkuDescriptionInner & gt ; object wrapped in { @ link ServiceResponse } if successful . */ public Observable < ServiceResponse < Page < IotHubSkuDescriptionInner > > > getValidSkusNextSinglePageAsync ( final String nextPageLink ) { } }
if ( nextPageLink == null ) { throw new IllegalArgumentException ( "Parameter nextPageLink is required and cannot be null." ) ; } String nextUrl = String . format ( "%s" , nextPageLink ) ; return service . getValidSkusNext ( nextUrl , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < Page < IotHubSkuDescriptionInner > > > > ( ) { @ Override public Observable < ServiceResponse < Page < IotHubSkuDescriptionInner > > > call ( Response < ResponseBody > response ) { try { ServiceResponse < PageImpl < IotHubSkuDescriptionInner > > result = getValidSkusNextDelegate ( response ) ; return Observable . just ( new ServiceResponse < Page < IotHubSkuDescriptionInner > > ( result . body ( ) , result . response ( ) ) ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ;
public class ResourceInstanceHelper { /** * Returns the number of active instances of the given template name * @ param service * @ param templateName * @ return */ public int countActiveInstances ( ServiceManagerResourceRestService service , String templateName ) { } }
List < ResourceInstanceDTO > instances = activeInstances ( service , templateName ) ; return instances . size ( ) ;
public class MutableTimecodeDuration { /** * Returns a TimecodeDuration instance for given timecode string and timecode base . * What is considered acceptable input varies per selected StringType * @ param timecode * @ param timecodeBase * @ param stringType * @ return the TimecodeDuration * @ throws IllegalArgumentException */ public static MutableTimecodeDuration valueOf ( String timecode , int timecodeBase , StringType stringType ) throws IllegalArgumentException { } }
MutableTimecodeDuration tc = new MutableTimecodeDuration ( ) ; return ( MutableTimecodeDuration ) tc . parse ( timecode , timecodeBase , stringType ) ;
public class DescribeTrustsRequest { /** * A list of identifiers of the trust relationships for which to obtain the information . If this member is null , all * trust relationships that belong to the current account are returned . * An empty list results in an < code > InvalidParameterException < / code > being thrown . * @ param trustIds * A list of identifiers of the trust relationships for which to obtain the information . If this member is * null , all trust relationships that belong to the current account are returned . < / p > * An empty list results in an < code > InvalidParameterException < / code > being thrown . */ public void setTrustIds ( java . util . Collection < String > trustIds ) { } }
if ( trustIds == null ) { this . trustIds = null ; return ; } this . trustIds = new com . amazonaws . internal . SdkInternalList < String > ( trustIds ) ;
public class CmsEditFieldDialog { /** * Returns a list for the indexed select box . < p > * @ return a list for the indexed select box */ private List < CmsSelectWidgetOption > getTokenizedWidgetConfiguration ( ) { } }
List < CmsSelectWidgetOption > result = new ArrayList < CmsSelectWidgetOption > ( ) ; result . add ( new CmsSelectWidgetOption ( "true" , true ) ) ; result . add ( new CmsSelectWidgetOption ( "false" , false ) ) ; result . add ( new CmsSelectWidgetOption ( "untokenized" , false ) ) ; return result ;
public class UpgradableLock { /** * Attempt to immediately acquire an upgrade lock . * @ param locker object trying to become lock owner * @ return FAILED , ACQUIRED or OWNED */ private final Result tryLockForUpgrade_ ( L locker ) { } }
int state = mState ; if ( ( state & LOCK_STATE_MASK ) == 0 ) { // no write or upgrade lock is held if ( isUpgradeFirst ( ) ) { do { if ( setUpgradeLock ( state ) ) { mOwner = locker ; incrementUpgradeCount ( ) ; return Result . ACQUIRED ; } // keep looping on CAS failure if a reader mucked with the state } while ( ( ( state = mState ) & LOCK_STATE_MASK ) == 0 ) ; } } else if ( mOwner == locker ) { incrementUpgradeCount ( ) ; return Result . OWNED ; } return Result . FAILED ;
public class RunJobFlowRequest { /** * A list of bootstrap actions to run before Hadoop starts on the cluster nodes . * @ return A list of bootstrap actions to run before Hadoop starts on the cluster nodes . */ public java . util . List < BootstrapActionConfig > getBootstrapActions ( ) { } }
if ( bootstrapActions == null ) { bootstrapActions = new com . amazonaws . internal . SdkInternalList < BootstrapActionConfig > ( ) ; } return bootstrapActions ;
public class ReflectionUtils { /** * Obtain the primitive type for the given wrapper type . * @ param wrapperType The primitive type * @ return The wrapper type */ public static Class getPrimitiveType ( Class wrapperType ) { } }
Class < ? > wrapper = WRAPPER_TO_PRIMITIVE . get ( wrapperType ) ; if ( wrapper != null ) { return wrapper ; } return wrapperType ;
public class BasicUserProfile { /** * Add attributes . * @ param attributes use attributes */ public void addAttributes ( final Map < String , Object > attributes ) { } }
if ( attributes != null ) { for ( final Map . Entry < String , Object > entry : attributes . entrySet ( ) ) { addAttribute ( entry . getKey ( ) , entry . getValue ( ) ) ; } }
public class WebFragmentDescriptorImpl { /** * If not already created , a new < code > data - source < / code > element will be created and returned . * Otherwise , the first existing < code > data - source < / code > element will be returned . * @ return the instance defined for the element < code > data - source < / code > */ public DataSourceType < WebFragmentDescriptor > getOrCreateDataSource ( ) { } }
List < Node > nodeList = model . get ( "data-source" ) ; if ( nodeList != null && nodeList . size ( ) > 0 ) { return new DataSourceTypeImpl < WebFragmentDescriptor > ( this , "data-source" , model , nodeList . get ( 0 ) ) ; } return createDataSource ( ) ;