signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class DescribeCompanyNetworkConfigurationRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( DescribeCompanyNetworkConfigurationRequest describeCompanyNetworkConfigurationRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( describeCompanyNetworkConfigurationRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( describeCompanyNetworkConfigurationRequest . getFleetArn ( ) , FLEETARN_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class RepositoryBrowsers { /** * Only returns those { @ link RepositoryBrowser } descriptors that extend from the given type . */ public static List < Descriptor < RepositoryBrowser < ? > > > filter ( Class < ? extends RepositoryBrowser > t ) { } }
List < Descriptor < RepositoryBrowser < ? > > > r = new ArrayList < > ( ) ; for ( Descriptor < RepositoryBrowser < ? > > d : RepositoryBrowser . all ( ) ) if ( d . isSubTypeOf ( t ) ) r . add ( d ) ; return r ;
public class JobClient { /** * configure the jobconf of the user with the command line options of * - libjars , - files , - archives * @ param conf * @ throws IOException */ private void copyAndConfigureFiles ( JobConf job , Path uploadFileDir , boolean shared ) throws IOException { } }
if ( ! ( job . getBoolean ( "mapred.used.genericoptionsparser" , false ) ) ) { LOG . warn ( "Use GenericOptionsParser for parsing the arguments. " + "Applications should implement Tool for the same." ) ; } // get all the command line arguments into the // jobconf passed in by the user conf String files = job . get ( "tmpfiles" ) ; String libjars = job . get ( "tmpjars" ) ; String archives = job . get ( "tmparchives" ) ; // Figure out what fs the JobTracker is using . Copy the // job to it , under a temporary name . This allows DFS to work , // and under the local fs also provides UNIX - like object loading // semantics . ( that is , if the job file is deleted right after // submission , we can still run the submission to completion ) // Create a number of filenames in the JobTracker ' s fs namespace FileSystem fs = getFs ( ) ; LOG . debug ( "default FileSystem: " + fs . getUri ( ) ) ; // We know file system of distributed cache , initialize index if ( filesInCache == null ) { filesInCache = new DistributedCacheIndex ( fs ) ; } uploadFileDir = fs . makeQualified ( uploadFileDir ) ; uploadFileDir = new Path ( uploadFileDir . toUri ( ) . getPath ( ) ) ; FsPermission mapredSysPerms = new FsPermission ( JOB_DIR_PERMISSION ) ; if ( ! fs . exists ( uploadFileDir ) ) { FileSystem . mkdirs ( fs , uploadFileDir , mapredSysPerms ) ; } Path filesDir = new Path ( uploadFileDir , "files" ) ; Path archivesDir = new Path ( uploadFileDir , "archives" ) ; Path libjarsDir = new Path ( uploadFileDir , "libjars" ) ; short replication = ( short ) job . getInt ( "mapred.submit.replication" , 10 ) ; fileInfo = new HashMap < URI , FileInfo > ( ) ; String originalJar = job . getJar ( ) ; if ( originalJar != null ) { // use jar name if job is not named . if ( "" . equals ( job . getJobName ( ) ) ) { job . setJobName ( new Path ( originalJar ) . getName ( ) ) ; } Path uploadJarPath ; Path originalJarPath = new Path ( originalJar ) ; originalJarPath = originalJarPath . makeQualified ( FileSystem . getLocal ( job ) ) ; try { // If sharing is turned on , we load the job jar into the distributed // cache if ( shared ) { if ( ! fs . exists ( libjarsDir ) ) { FileSystem . mkdirs ( fs , libjarsDir , mapredSysPerms ) ; } MD5Hash md5hash = MD5Hash . digest ( new FileInputStream ( originalJarPath . toUri ( ) . getPath ( ) ) ) ; uploadJarPath = copyRemoteFiles ( fs , libjarsDir , originalJarPath , job , replication , md5hash . toString ( ) ) ; URI pathURI = new URI ( uploadJarPath . toUri ( ) . toString ( ) ) ; DistributedCache . addSharedArchiveToClassPath ( uploadJarPath , job ) ; fileInfo . put ( pathURI , new FileInfo ( md5hash . toString ( ) , md5hash . getFileLength ( ) , 0 ) ) ; } else { // Otherwise we copy jar to JT ' s filesystem uploadJarPath = new Path ( uploadFileDir , "job.jar" ) ; fs . copyFromLocalFile ( originalJarPath , uploadJarPath ) ; } } catch ( URISyntaxException ue ) { // should not throw an uri exception throw new IOException ( "Failed to create uri for " + originalJar ) ; } job . setJar ( uploadJarPath . toString ( ) ) ; fs . setReplication ( uploadJarPath , replication ) ; try { fs . setPermission ( uploadJarPath , new FsPermission ( JOB_FILE_PERMISSION ) ) ; } catch ( IOException ioe ) { LOG . warn ( "Unable to set job jar permission" ) ; } } else { LOG . warn ( "No job jar file set. User classes may not be found. " + "See JobConf(Class) or JobConf#setJar(String)." ) ; } // add all the command line files / jars and archive // first copy them to jobtrackers filesystem if ( files != null ) { if ( ! fs . exists ( filesDir ) ) { FileSystem . mkdirs ( fs , filesDir , mapredSysPerms ) ; } String [ ] fileArr = files . split ( "," ) ; for ( String tmpFile : fileArr ) { Path tmp = new Path ( tmpFile ) ; Path newPath ; FileStatus fStatus = null ; MD5Hash md5hash = null ; try { if ( shared ) { md5hash = MD5Hash . digest ( new FileInputStream ( tmp . toUri ( ) . getPath ( ) ) ) ; newPath = copyRemoteFiles ( fs , filesDir , tmp , job , replication , md5hash . toString ( ) ) ; URI pathURI = new URI ( newPath . toUri ( ) . toString ( ) + "#" + newPath . getName ( ) ) ; DistributedCache . addSharedCacheFile ( pathURI , job ) ; fileInfo . put ( pathURI , new FileInfo ( md5hash . toString ( ) , md5hash . getFileLength ( ) , 0 ) ) ; } else { newPath = copyRemoteFiles ( fs , filesDir , tmp , job , replication ) ; fStatus = DistributedCache . getFileStatus ( job , newPath . toUri ( ) ) ; URI pathURI = new URI ( newPath . toUri ( ) . toString ( ) + "#" + newPath . getName ( ) ) ; DistributedCache . addCacheFile ( pathURI , job ) ; fileInfo . put ( pathURI , new FileInfo ( null , fStatus . getLen ( ) , fStatus . getModificationTime ( ) ) ) ; } } catch ( URISyntaxException ue ) { // should not throw a uri exception throw new IOException ( "Failed to create uri for " + tmpFile ) ; } DistributedCache . createSymlink ( job ) ; } } if ( libjars != null ) { if ( ! fs . exists ( libjarsDir ) ) { FileSystem . mkdirs ( fs , libjarsDir , mapredSysPerms ) ; } String [ ] libjarsArr = libjars . split ( "," ) ; for ( String tmpjars : libjarsArr ) { Path tmp = new Path ( tmpjars ) ; Path newPath ; if ( shared ) { MD5Hash md5hash = MD5Hash . digest ( new FileInputStream ( tmp . toUri ( ) . getPath ( ) ) ) ; newPath = copyRemoteFiles ( fs , libjarsDir , tmp , job , replication , md5hash . toString ( ) ) ; DistributedCache . addSharedArchiveToClassPath ( newPath , job ) ; fileInfo . put ( newPath . makeQualified ( newPath . getFileSystem ( job ) ) . toUri ( ) , new FileInfo ( md5hash . toString ( ) , md5hash . getFileLength ( ) , 0 ) ) ; } else { newPath = copyRemoteFiles ( fs , libjarsDir , tmp , job , replication ) ; DistributedCache . addArchiveToClassPath ( newPath , job ) ; FileStatus fStatus = DistributedCache . getFileStatus ( job , newPath . toUri ( ) ) ; fileInfo . put ( newPath . makeQualified ( newPath . getFileSystem ( job ) ) . toUri ( ) , new FileInfo ( null , fStatus . getLen ( ) , fStatus . getModificationTime ( ) ) ) ; } } } if ( archives != null ) { if ( ! fs . exists ( archivesDir ) ) { FileSystem . mkdirs ( fs , archivesDir , mapredSysPerms ) ; } String [ ] archivesArr = archives . split ( "," ) ; for ( String tmpArchives : archivesArr ) { Path tmp = new Path ( tmpArchives ) ; Path newPath ; MD5Hash md5hash = null ; FileStatus fStatus = null ; try { if ( shared ) { md5hash = MD5Hash . digest ( new FileInputStream ( tmp . toUri ( ) . getPath ( ) ) ) ; newPath = copyRemoteFiles ( fs , archivesDir , tmp , job , replication , md5hash . toString ( ) ) ; URI pathURI = new URI ( newPath . toUri ( ) . toString ( ) + "#" + newPath . getName ( ) ) ; DistributedCache . addSharedCacheArchive ( pathURI , job ) ; fileInfo . put ( pathURI , new FileInfo ( md5hash . toString ( ) , md5hash . getFileLength ( ) , 0 ) ) ; } else { newPath = copyRemoteFiles ( fs , archivesDir , tmp , job , replication ) ; fStatus = DistributedCache . getFileStatus ( job , newPath . toUri ( ) ) ; URI pathURI = new URI ( newPath . toUri ( ) . toString ( ) + "#" + newPath . getName ( ) ) ; DistributedCache . addCacheArchive ( pathURI , job ) ; fileInfo . put ( pathURI , new FileInfo ( null , fStatus . getLen ( ) , fStatus . getModificationTime ( ) ) ) ; } } catch ( URISyntaxException ue ) { // should not throw an uri excpetion throw new IOException ( "Failed to create uri for " + tmpArchives ) ; } DistributedCache . createSymlink ( job ) ; } } // set the timestamps and md5 of the archives and files URI [ ] tarchives = DistributedCache . getSharedCacheArchives ( job ) ; if ( tarchives != null ) { StringBuffer archiveLength = new StringBuffer ( ) ; FileStatus fStatus ; FileInfo info ; long fileLength ; for ( int i = 0 ; i < tarchives . length ; i ++ ) { if ( i != 0 ) archiveLength . append ( ',' ) ; info = fileInfo . get ( tarchives [ i ] ) ; if ( info == null ) { fStatus = DistributedCache . getFileStatus ( job , tarchives [ i ] ) ; fileLength = fStatus . getLen ( ) ; } else { fileLength = info . fileLength ; } archiveLength . append ( fileLength ) ; } DistributedCache . setSharedArchiveLength ( job , archiveLength . toString ( ) ) ; } URI [ ] tfiles = DistributedCache . getSharedCacheFiles ( job ) ; if ( tfiles != null ) { StringBuffer fileLength = new StringBuffer ( ) ; FileStatus fStatus ; FileInfo info ; long len ; for ( int i = 0 ; i < tfiles . length ; i ++ ) { if ( i != 0 ) fileLength . append ( ',' ) ; info = fileInfo . get ( tfiles [ i ] ) ; if ( info == null ) { fStatus = DistributedCache . getFileStatus ( job , tfiles [ i ] ) ; len = fStatus . getLen ( ) ; } else { len = info . fileLength ; } fileLength . append ( len ) ; } DistributedCache . setSharedFileLength ( job , fileLength . toString ( ) ) ; } tarchives = DistributedCache . getCacheArchives ( job ) ; if ( tarchives != null ) { StringBuffer archiveTimestamps = new StringBuffer ( ) ; FileInfo info ; long timeStamp ; for ( int i = 0 ; i < tarchives . length ; i ++ ) { if ( i != 0 ) archiveTimestamps . append ( ',' ) ; info = fileInfo . get ( tarchives [ i ] ) ; if ( info == null ) { timeStamp = DistributedCache . getTimestamp ( job , tarchives [ i ] ) ; } else { timeStamp = info . timeStamp ; } archiveTimestamps . append ( timeStamp ) ; } DistributedCache . setArchiveTimestamps ( job , archiveTimestamps . toString ( ) ) ; } tfiles = DistributedCache . getCacheFiles ( job ) ; if ( tfiles != null ) { StringBuffer fileTimestamps = new StringBuffer ( ) ; FileInfo info ; long timeStamp ; for ( int i = 0 ; i < tfiles . length ; i ++ ) { if ( i != 0 ) fileTimestamps . append ( ',' ) ; info = fileInfo . get ( tfiles [ i ] ) ; if ( info == null ) { timeStamp = DistributedCache . getTimestamp ( job , tfiles [ i ] ) ; } else { timeStamp = info . timeStamp ; } fileTimestamps . append ( timeStamp ) ; } DistributedCache . setFileTimestamps ( job , fileTimestamps . toString ( ) ) ; } configureUserName ( job ) ;
public class ProcessedInput { /** * Returns position ( order ) of specified parameter name * @ param parameterName parameter name which would be searched * @ return position of parameter , null if it wasn ' t found in list of parameter names */ public Integer getPosition ( String parameterName ) { } }
Integer position = null ; for ( int i = 0 ; i < this . sqlParameterNames . size ( ) ; i ++ ) { if ( this . sqlParameterNames . get ( i ) . equals ( parameterName ) == true ) { position = i ; break ; } } return position ;
public class Segment { /** * Returns the index of the first clustering having an unpaired cluster , or - 1 * no unpaired cluster exists . * @ return clustering id or - 1 */ public int getUnpairedClusteringIndex ( ) { } }
for ( int index = 0 ; index < clusterIds . length ; index ++ ) { if ( clusterIds [ index ] == UNCLUSTERED ) { return index ; } } return - 1 ;
public class FixedLengthRowBasedKeyValueBatch { /** * Append a key value pair . * It copies data into the backing MemoryBlock . * Returns an UnsafeRow pointing to the value if succeeds , otherwise returns null . */ @ Override public UnsafeRow appendRow ( Object kbase , long koff , int klen , Object vbase , long voff , int vlen ) { } }
// if run out of max supported rows or page size , return null if ( numRows >= capacity || page == null || page . size ( ) - pageCursor < recordLength ) { return null ; } long offset = page . getBaseOffset ( ) + pageCursor ; final long recordOffset = offset ; Platform . copyMemory ( kbase , koff , base , offset , klen ) ; offset += klen ; Platform . copyMemory ( vbase , voff , base , offset , vlen ) ; offset += vlen ; Platform . putLong ( base , offset , 0 ) ; pageCursor += recordLength ; keyRowId = numRows ; keyRow . pointTo ( base , recordOffset , klen ) ; valueRow . pointTo ( base , recordOffset + klen , vlen ) ; numRows ++ ; return valueRow ;
public class BigQueryTableHelper { /** * Creates { @ link TableSchema } from the JSON representation of the table fields . * @ param fieldsJson JSON fields to convert to { @ link TableSchema } * @ return { @ link TableSchema } * @ throws IOException */ static TableSchema createTableSchemaFromFields ( String fieldsJson ) throws IOException { } }
List < TableFieldSchema > fields = new ArrayList < > ( ) ; JsonParser parser = JacksonFactory . getDefaultInstance ( ) . createJsonParser ( fieldsJson ) ; parser . parseArrayAndClose ( fields , TableFieldSchema . class ) ; return new TableSchema ( ) . setFields ( fields ) ;
public class Utils { /** * Replies if the given type is a final type . * @ param expressionType - the type to test . * @ return < code > true < / code > if the given type is final . */ public static boolean isFinal ( Class < ? > expressionType ) { } }
if ( expressionType . isArray ( ) ) { return isFinal ( expressionType . getComponentType ( ) ) ; } if ( expressionType . isPrimitive ( ) ) { return true ; } return expressionType . isEnum ( ) || Modifier . isFinal ( expressionType . getModifiers ( ) ) ;
public class BatchDeleteScheduledActionResult { /** * The names of the scheduled actions that could not be deleted , including an error message . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setFailedScheduledActions ( java . util . Collection ) } or * { @ link # withFailedScheduledActions ( java . util . Collection ) } if you want to override the existing values . * @ param failedScheduledActions * The names of the scheduled actions that could not be deleted , including an error message . * @ return Returns a reference to this object so that method calls can be chained together . */ public BatchDeleteScheduledActionResult withFailedScheduledActions ( FailedScheduledUpdateGroupActionRequest ... failedScheduledActions ) { } }
if ( this . failedScheduledActions == null ) { setFailedScheduledActions ( new com . amazonaws . internal . SdkInternalList < FailedScheduledUpdateGroupActionRequest > ( failedScheduledActions . length ) ) ; } for ( FailedScheduledUpdateGroupActionRequest ele : failedScheduledActions ) { this . failedScheduledActions . add ( ele ) ; } return this ;
public class JsonParser { /** * Throws a helpful exception based on the current alphanumeric token . */ private JsonParserException createHelpfulException ( char first , char [ ] expected , int failurePosition ) throws JsonParserException { } }
// Build the first part of the token StringBuilder errorToken = new StringBuilder ( first + ( expected == null ? "" : new String ( expected , 0 , failurePosition ) ) ) ; // Consume the whole pseudo - token to make a better error message while ( isAsciiLetter ( peekChar ( ) ) && errorToken . length ( ) < 15 ) errorToken . append ( ( char ) advanceChar ( ) ) ; return createParseException ( null , "Unexpected token '" + errorToken + "'" + ( expected == null ? "" : ". Did you mean '" + first + new String ( expected ) + "'?" ) , true ) ;
public class SupplierUtils { /** * Returns a composed function that first executes the Supplier and optionally recovers from an exception . * @ param < T > return type of after * @ param exceptionHandler the exception handler * @ return a function composed of supplier and exceptionHandler */ public static < T > Supplier < T > recover ( Supplier < T > supplier , Function < Exception , T > exceptionHandler ) { } }
return ( ) -> { try { return supplier . get ( ) ; } catch ( Exception exception ) { return exceptionHandler . apply ( exception ) ; } } ;
public class RouteTablesInner { /** * Gets all route tables in a resource group . * @ param resourceGroupName The name of the resource group . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the PagedList & lt ; RouteTableInner & gt ; object */ public Observable < Page < RouteTableInner > > listByResourceGroupAsync ( final String resourceGroupName ) { } }
return listByResourceGroupWithServiceResponseAsync ( resourceGroupName ) . map ( new Func1 < ServiceResponse < Page < RouteTableInner > > , Page < RouteTableInner > > ( ) { @ Override public Page < RouteTableInner > call ( ServiceResponse < Page < RouteTableInner > > response ) { return response . body ( ) ; } } ) ;
public class BuildDatabase { /** * Checks if a topic is unique in the database . * @ param topic The Topic to be checked to see if it ' s unique . * @ return True if the topic exists in the database and it is unique , otherwise false . */ public boolean isUniqueSpecTopic ( final SpecTopic topic ) { } }
return topics . containsKey ( topic . getDBId ( ) ) ? topics . get ( topic . getDBId ( ) ) . size ( ) == 1 : false ;
public class FeedItem { /** * Sets the geoTargetingRestriction value for this FeedItem . * @ param geoTargetingRestriction * Geo targeting restriction specifies the type of location that * can be used for targeting . * Only applies if the feed item has a location FeedItemTarget . * On update , if the field is left unspecified , the previous geo targeting * restriction state * will not be changed . * On update , if the field is set with a null GeoRestriction * enum , the geo targeting restriction * will be cleared . */ public void setGeoTargetingRestriction ( com . google . api . ads . adwords . axis . v201809 . cm . FeedItemGeoRestriction geoTargetingRestriction ) { } }
this . geoTargetingRestriction = geoTargetingRestriction ;
public class IRI { /** * Determines if this IRI is absolute * @ return { @ code true } if this IRI is absolute or { @ code false } if this IRI * is not absolute */ public boolean isAbsolute ( ) { } }
int colonIndex = namespace . indexOf ( ':' ) ; if ( colonIndex == - 1 ) { return false ; } for ( int i = 0 ; i < colonIndex ; i ++ ) { char ch = namespace . charAt ( i ) ; if ( ! Character . isLetter ( ch ) && ! Character . isDigit ( ch ) && ch != '.' && ch != '+' && ch != '-' ) { return false ; } } return true ;
public class GroovyScriptEngineImpl { /** * package - privates */ Object eval ( Class scriptClass , final ScriptContext ctx ) throws ScriptException { } }
// Bindings so script has access to this environment . // Only initialize once . if ( null == ctx . getAttribute ( "context" , ScriptContext . ENGINE_SCOPE ) ) { // add context to bindings ctx . setAttribute ( "context" , ctx , ScriptContext . ENGINE_SCOPE ) ; // direct output to ctx . getWriter // If we ' re wrapping with a PrintWriter here , // enable autoFlush because otherwise it might not get done ! final Writer writer = ctx . getWriter ( ) ; ctx . setAttribute ( "out" , ( writer instanceof PrintWriter ) ? writer : new PrintWriter ( writer , true ) , ScriptContext . ENGINE_SCOPE ) ; // Not going to do this after all ( at least for now ) . // Scripts can use context . { reader , writer , errorWriter } . // That is a modern version of System . { in , out , err } or Console . { reader , writer } ( ) . // / / New I / O names consistent with ScriptContext and java . io . Console . // ctx . setAttribute ( " writer " , writer , ScriptContext . ENGINE _ SCOPE ) ; // / / Direct errors to ctx . getErrorWriter // final Writer errorWriter = ctx . getErrorWriter ( ) ; // ctx . setAttribute ( " errorWriter " , ( errorWriter instanceof PrintWriter ) ? // errorWriter : // new PrintWriter ( errorWriter ) , // ScriptContext . ENGINE _ SCOPE ) ; // / / Get input from ctx . getReader // / / We don ' t wrap with BufferedReader here because we expect that if // / / the host wants that they do it . Either way Groovy scripts will // / / always have readLine because the GDK supplies it for Reader . // ctx . setAttribute ( " reader " , ctx . getReader ( ) , ScriptContext . ENGINE _ SCOPE ) ; } // Fix for GROOVY - 3669 : Can ' t use several times the same JSR - 223 ScriptContext for differents groovy script if ( ctx . getWriter ( ) != null ) { ctx . setAttribute ( "out" , new PrintWriter ( ctx . getWriter ( ) , true ) , ScriptContext . ENGINE_SCOPE ) ; } /* * We use the following Binding instance so that global variable lookup * will be done in the current ScriptContext instance . */ Binding binding = new Binding ( ctx . getBindings ( ScriptContext . ENGINE_SCOPE ) ) { @ Override public Object getVariable ( String name ) { synchronized ( ctx ) { int scope = ctx . getAttributesScope ( name ) ; if ( scope != - 1 ) { return ctx . getAttribute ( name , scope ) ; } } throw new MissingPropertyException ( name , getClass ( ) ) ; } @ Override public void setVariable ( String name , Object value ) { synchronized ( ctx ) { int scope = ctx . getAttributesScope ( name ) ; if ( scope == - 1 ) { scope = ScriptContext . ENGINE_SCOPE ; } ctx . setAttribute ( name , value , scope ) ; } } } ; try { // if this class is not an instance of Script , it ' s a full - blown class // then simply return that class if ( ! Script . class . isAssignableFrom ( scriptClass ) ) { return scriptClass ; } else { // it ' s a script Script scriptObject = InvokerHelper . createScript ( scriptClass , binding ) ; // save all current closures into global closures map Method [ ] methods = scriptClass . getMethods ( ) ; for ( Method m : methods ) { String name = m . getName ( ) ; globalClosures . put ( name , new MethodClosure ( scriptObject , name ) ) ; } MetaClass oldMetaClass = scriptObject . getMetaClass ( ) ; /* * We override the MetaClass of this script object so that we can * forward calls to global closures ( of previous or future " eval " calls ) * This gives the illusion of working on the same " global " scope . */ scriptObject . setMetaClass ( new DelegatingMetaClass ( oldMetaClass ) { @ Override public Object invokeMethod ( Object object , String name , Object args ) { if ( args == null ) { return invokeMethod ( object , name , MetaClassHelper . EMPTY_ARRAY ) ; } if ( args instanceof Tuple ) { return invokeMethod ( object , name , ( ( Tuple ) args ) . toArray ( ) ) ; } if ( args instanceof Object [ ] ) { return invokeMethod ( object , name , ( Object [ ] ) args ) ; } else { return invokeMethod ( object , name , new Object [ ] { args } ) ; } } @ Override public Object invokeMethod ( Object object , String name , Object [ ] args ) { try { return super . invokeMethod ( object , name , args ) ; } catch ( MissingMethodException mme ) { return callGlobal ( name , args , ctx ) ; } } @ Override public Object invokeStaticMethod ( Object object , String name , Object [ ] args ) { try { return super . invokeStaticMethod ( object , name , args ) ; } catch ( MissingMethodException mme ) { return callGlobal ( name , args , ctx ) ; } } } ) ; return scriptObject . run ( ) ; } } catch ( Exception e ) { throw new ScriptException ( e ) ; } finally { // Fix for GROOVY - 3669 : Can ' t use several times the same JSR - 223 ScriptContext for different groovy script // Groovy ' s scripting engine implementation adds those two variables in the binding // but should clean up afterwards ctx . removeAttribute ( "context" , ScriptContext . ENGINE_SCOPE ) ; ctx . removeAttribute ( "out" , ScriptContext . ENGINE_SCOPE ) ; }
public class Code { /** * Registers { @ code catchClause } as a branch target for all instructions * in this frame that throw a class assignable to { @ code toCatch } . This * includes methods invoked from this frame . Deregister the clause using * { @ link # removeCatchClause removeCatchClause ( ) } . It is an error to * register a catch clause without also { @ link # mark marking it } in the same * { @ code Code } instance . */ public void addCatchClause ( TypeId < ? extends Throwable > toCatch , Label catchClause ) { } }
if ( catchTypes . contains ( toCatch ) ) { throw new IllegalArgumentException ( "Already caught: " + toCatch ) ; } adopt ( catchClause ) ; catchTypes . add ( toCatch ) ; catches = toTypeList ( catchTypes ) ; catchLabels . add ( catchClause ) ;
public class DivOpAxis { /** * { @ inheritDoc } */ @ Override public AtomicValue operate ( final AtomicValue mOperand1 , final AtomicValue mOperand2 ) throws TTXPathException { } }
final Type returnType = getReturnType ( mOperand1 . getTypeKey ( ) , mOperand2 . getTypeKey ( ) ) ; final int typeKey = NamePageHash . generateHashForString ( returnType . getStringRepr ( ) ) ; final byte [ ] value ; switch ( returnType ) { case DECIMAL : case FLOAT : case DOUBLE : final double aD = Double . parseDouble ( new String ( mOperand1 . getRawValue ( ) ) ) ; final double dValue ; if ( aD == 0.0 || aD == - 0.0 ) { dValue = Double . NaN ; } else { dValue = aD / Double . parseDouble ( new String ( mOperand2 . getRawValue ( ) ) ) ; } value = TypedValue . getBytes ( dValue ) ; return new AtomicValue ( value , typeKey ) ; case INTEGER : try { final int iValue = ( int ) Double . parseDouble ( new String ( mOperand1 . getRawValue ( ) ) ) / ( int ) Double . parseDouble ( new String ( mOperand2 . getRawValue ( ) ) ) ; value = TypedValue . getBytes ( iValue ) ; return new AtomicValue ( value , typeKey ) ; } catch ( final ArithmeticException e ) { throw new XPathError ( ErrorType . FOAR0001 ) ; } case YEAR_MONTH_DURATION : case DAY_TIME_DURATION : throw new IllegalStateException ( "Add operator is not implemented for the type " + returnType . getStringRepr ( ) + " yet." ) ; default : throw new XPathError ( ErrorType . XPTY0004 ) ; }
public class AbstractConfiguration { /** * Gets the value of the configuration property identified by name as a value of the specified Class type . * The property is required to be declared and defined otherwise a ConfigurationException is thrown . * @ param propertyName a String value indicating the name of the configuration property . * @ param type the expected Class type of the configuration property value . * @ return the value of the configuration property identified by name . * @ throws ConfigurationException if the property value was undeclared or is undefined . */ public < T > T getPropertyValueAs ( final String propertyName , final Class < T > type ) { } }
return convert ( getPropertyValue ( propertyName , DEFAULT_REQUIRED ) , type ) ;
public class SocksServerSocket { private void processReply ( ProxyMessage reply ) throws SocksException { } }
localPort = reply . port ; /* * If the server have assigned same host as it was contacted on * it might return an address of all zeros */ if ( reply . host . equals ( "0.0.0.0" ) ) { localIP = proxy . proxyIP ; localHost = localIP . getHostName ( ) ; } else { localHost = reply . host ; localIP = reply . ip ; }
public class PRNGFixes { /** * Gets the hardware serial number of this device . * @ return serial number or { @ code null } if not available . */ private static String getDeviceSerialNumber ( ) { } }
// We ' re using the Reflection API because Build . SERIAL is only available // since API Level 9 ( Gingerbread , Android 2.3 ) . try { return ( String ) Build . class . getField ( "SERIAL" ) . get ( null ) ; } catch ( Exception ignored ) { return null ; }
public class ApiOvhCloud { /** * Alter an instance * REST : PUT / cloud / project / { serviceName } / instance / { instanceId } * @ param instanceId [ required ] Instance id * @ param instanceName [ required ] Instance new name * @ param serviceName [ required ] Service name */ public void project_serviceName_instance_instanceId_PUT ( String serviceName , String instanceId , String instanceName ) throws IOException { } }
String qPath = "/cloud/project/{serviceName}/instance/{instanceId}" ; StringBuilder sb = path ( qPath , serviceName , instanceId ) ; HashMap < String , Object > o = new HashMap < String , Object > ( ) ; addBody ( o , "instanceName" , instanceName ) ; exec ( qPath , "PUT" , sb . toString ( ) , o ) ;
public class MoleculeBuilder { /** * Generate an Alkane ( chain of carbons with no hydrogens ) of a given length . * < p > This method was written by Stephen Tomkinson . * @ param chainLength The number of carbon atoms to have in the chain . * @ return A molecule containing a bonded chain of carbons . * @ cdk . created 2003-08-15 */ private static IAtomContainer makeAlkane ( int chainLength ) { } }
IAtomContainer currentChain = new AtomContainer ( ) ; // Add the initial atom currentChain . addAtom ( new Atom ( "C" ) ) ; // Add further atoms and bonds as needed , a pair at a time . for ( int atomCount = 1 ; atomCount < chainLength ; atomCount ++ ) { currentChain . addAtom ( new Atom ( "C" ) ) ; currentChain . addBond ( atomCount , atomCount - 1 , IBond . Order . SINGLE ) ; } return currentChain ;
public class AmazonAlexaForBusinessClient { /** * Deletes a skill group by skill group ARN . * @ param deleteSkillGroupRequest * @ return Result of the DeleteSkillGroup operation returned by the service . * @ throws NotFoundException * The resource is not found . * @ throws ConcurrentModificationException * There is a concurrent modification of resources . * @ sample AmazonAlexaForBusiness . DeleteSkillGroup * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / alexaforbusiness - 2017-11-09 / DeleteSkillGroup " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DeleteSkillGroupResult deleteSkillGroup ( DeleteSkillGroupRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDeleteSkillGroup ( request ) ;
public class Router { /** * Specify a middleware that will be called for a matching HTTP OPTIONS * @ param regex A regular expression * @ param handlers The middleware to call */ public Router options ( @ NotNull final Pattern regex , @ NotNull final IMiddleware ... handlers ) { } }
addRegEx ( "OPTIONS" , regex , handlers , optionsBindings ) ; return this ;
public class AstBinOp { /** * Produces a mapping array with indexes of the smaller pointing to the larger domain . * @ param longerDomain Domain to originally map from * @ param shorterDomain Domain to originally map to * @ return Cross - domain mapping as an array of primitive integers */ private int [ ] alignCategoricals ( String [ ] longerDomain , String [ ] shorterDomain ) { } }
String [ ] sortedLongerDomain = Arrays . copyOf ( longerDomain , longerDomain . length ) ; // Sort to make sure binary search is possible Arrays . sort ( sortedLongerDomain ) ; int [ ] transformedIndices = MemoryManager . malloc4 ( shorterDomain . length ) ; for ( int i = 0 ; i < shorterDomain . length ; i ++ ) { transformedIndices [ i ] = Arrays . binarySearch ( sortedLongerDomain , shorterDomain [ i ] ) ; } return transformedIndices ;
public class PropositionUtil { /** * Binary search for a primitive parameter by timestamp . * @ param list * a < code > List < / code > of < code > PrimitiveParameter < / code > * objects all with the same paramId , cannot be < code > null < / code > . * @ param tstamp * the timestamp we ' re interested in finding . * @ return a < code > PrimitiveParameter < / code > , or null if not found . */ private static int binarySearchMaxFinish ( List < ? extends TemporalProposition > params , long timestamp ) { } }
/* * The conditions for using index versus iterator are grabbed from the * JDK source code . */ if ( params . size ( ) < 5000 || params instanceof RandomAccess ) { return maxFinishIndexedBinarySearch ( params , timestamp ) ; } else { return maxFinishIteratorBinarySearch ( params , timestamp ) ; }
public class TruncatedNormalDistributionTypeImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public boolean eIsSet ( int featureID ) { } }
switch ( featureID ) { case BpsimPackage . TRUNCATED_NORMAL_DISTRIBUTION_TYPE__MAX : return isSetMax ( ) ; case BpsimPackage . TRUNCATED_NORMAL_DISTRIBUTION_TYPE__MEAN : return isSetMean ( ) ; case BpsimPackage . TRUNCATED_NORMAL_DISTRIBUTION_TYPE__MIN : return isSetMin ( ) ; case BpsimPackage . TRUNCATED_NORMAL_DISTRIBUTION_TYPE__STANDARD_DEVIATION : return isSetStandardDeviation ( ) ; } return super . eIsSet ( featureID ) ;
public class SQLDatabaseQueue { /** * Submits a database task for execution in a transaction * @ param callable The task to be performed * @ param < T > The type of object that is returned from the task * @ throws RejectedExecutionException thrown when the queue has been shutdown * @ return Future representing the task to be executed . */ public < T > Future < T > submitTransaction ( SQLCallable < T > callable ) { } }
return this . submitTaskToQueue ( new SQLQueueCallable < T > ( db , callable , true ) ) ;
public class EJBApplicationMetaData { /** * Notification that the modules within the application have finished starting . The * caller is responsible for calling { @ link # stopping } if this method fails . * @ throws RuntimeWarning * if an error occurs while finishing the start */ public void started ( ) throws RuntimeWarning { } }
// F743-26072 if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "started" ) ; finishStarting ( ) ;
public class Ifc4FactoryImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public IfcTaskTypeEnum createIfcTaskTypeEnumFromString ( EDataType eDataType , String initialValue ) { } }
IfcTaskTypeEnum result = IfcTaskTypeEnum . get ( initialValue ) ; if ( result == null ) throw new IllegalArgumentException ( "The value '" + initialValue + "' is not a valid enumerator of '" + eDataType . getName ( ) + "'" ) ; return result ;
public class PreferenceFragment { /** * Initializes the list dialog . */ private void initializeListDialog ( ) { } }
MaterialDialog . Builder builder = new MaterialDialog . Builder ( getActivity ( ) ) ; configureHeaderDialogBuilder ( builder ) ; configureButtonBarDialogBuilder ( builder ) ; builder . setItems ( R . array . list_items , createSingleChoiceListener ( ) ) ; listDialog = builder . create ( ) ;
public class WonderPushFirebaseMessagingService { /** * Helper method that will register a device for Google Cloud Messaging * notification and register the device token to WonderPush . This method is * called within { @ link WonderPush # initialize ( Context , String , String ) } . * @ param context * The current { @ link Activity } ( preferred ) or { @ link Application } context . */ protected static void registerForPushNotification ( Context context ) { } }
if ( checkPlayService ( context ) ) { WonderPushFirebaseMessagingService . fetchInstanceId ( ) ; } else { Log . w ( TAG , "Google Play Services not present. Check your setup. If on an emulator, use a Google APIs system image." ) ; }
public class SVNCommands { /** * Run " svn cleanup " on the given working copy . * @ param directory The local working directory * @ throws IOException Execution of the SVN sub - process failed or the * sub - process returned a exit value indicating a failure */ public static void cleanup ( File directory ) throws IOException { } }
log . info ( "Cleaning SVN Working copy at " + directory ) ; CommandLine cmdLine = new CommandLine ( SVN_CMD ) ; cmdLine . addArgument ( "cleanup" ) ; addDefaultArguments ( cmdLine , null , null ) ; try ( InputStream result = ExecutionHelper . getCommandResult ( cmdLine , directory , 0 , 360000 ) ) { log . info ( "Svn-Cleanup reported:\n" + extractResult ( result ) ) ; }
public class DBNumBase { /** * 項が日付の4桁の年かどうか * @ param term * @ return */ protected boolean is4YearTerm ( Term < ? > term ) { } }
if ( ! ( term instanceof DateTerm . YearTerm ) ) { return false ; } DateTerm . YearTerm yearTerm = ( DateTerm . YearTerm ) term ; if ( yearTerm . getFormat ( ) . length ( ) == 4 ) { return true ; } return false ;
public class NetworkServiceRecordAgent { /** * Updates a specific PhysicalNetworkFunctionRecord . * @ param idNsr the ID of the NetworkServiceRecord containing the PhysicalNetworkFunctionRecord * @ param idPnfr the ID of the PhysicalNetworkFunctionRecord to update * @ param physicalNetworkFunctionRecord the updated version of the PhysicalNetworkFunctionRecord * @ return the updated PhysicalNetworkFunctionRecord * @ throws SDKException if the request fails */ @ Help ( help = "Update the PhysicalNetworkFunctionRecord of a NetworkServiceRecord with specific id" ) public PhysicalNetworkFunctionRecord updatePNFD ( final String idNsr , final String idPnfr , final PhysicalNetworkFunctionRecord physicalNetworkFunctionRecord ) throws SDKException { } }
String url = idNsr + "/pnfrecords" + "/" + idPnfr ; return ( PhysicalNetworkFunctionRecord ) requestPut ( url , physicalNetworkFunctionRecord ) ;
public class Join { /** * A replacement for Java 8 ' s String . join ( ) . * @ param sep * The separator string . * @ param items * The items to join . * @ return The string representation of the joined items . */ public static String join ( final String sep , final Object ... items ) { } }
final StringBuilder buf = new StringBuilder ( ) ; boolean first = true ; for ( final Object item : items ) { if ( first ) { first = false ; } else { buf . append ( sep ) ; } buf . append ( item . toString ( ) ) ; } return buf . toString ( ) ;
public class WSContext { /** * Move all the contents of the node and the node itself , as far as possible . * Do NOT move any external entries ( i . e . anything inserted directly into the * service registry ) , since we cannot actually remove these from the service * registry itself , and that store is what takes precedence . */ private boolean moveContents ( ContextNode oldNode , ContextNode newNode ) throws InvalidNameException , NameAlreadyBoundException , NameNotFoundException , NotContextException { } }
boolean emptied = true ; Iterator < Entry < String , Object > > entries = oldNode . children . entrySet ( ) . iterator ( ) ; while ( entries . hasNext ( ) ) { Entry < String , Object > entry = entries . next ( ) ; WSName name = new WSName ( entry . getKey ( ) ) ; Object child = entry . getValue ( ) ; if ( child instanceof AutoBindNode ) { emptied = false ; } else if ( child instanceof ServiceRegistration ) { ServiceRegistration < ? > reg = ( ServiceRegistration < ? > ) child ; String className = ( String ) reg . getReference ( ) . getProperty ( JNDIServiceBinder . OSGI_JNDI_SERVICE_CLASS ) ; reg . setProperties ( createServiceProperties ( newNode . fullName . plus ( name ) , className ) ) ; newNode . bind ( name , reg ) ; entries . remove ( ) ; } else if ( child instanceof ContextNode ) { ContextNode oldChildNode = ( ContextNode ) child ; ContextNode newChildNode = newNode . createSubcontext ( name ) ; if ( moveContents ( oldChildNode , newChildNode ) ) // if it was emptied , remove the node // TODO - potential race condition here - need to prevent any additions during clean entries . remove ( ) ; else // if it wasn ' t emptied we need to keep its parent too emptied = false ; } else { // there shouldn ' t be any other types of object in there ! if ( tc . isDebugEnabled ( ) ) Tr . debug ( tc , "Unexpected object type found in internal tree" , oldNode . fullName + WSName . SEPARATOR + entry . getKey ( ) , child ) ; // remove it anyway , and don ' t copy it to the new location entries . remove ( ) ; } } return emptied ;
public class JLanguageTool { /** * non - private only for test case */ static int countLineBreaks ( String s ) { } }
int pos = - 1 ; int count = 0 ; while ( true ) { int nextPos = s . indexOf ( '\n' , pos + 1 ) ; if ( nextPos == - 1 ) { break ; } pos = nextPos ; count ++ ; } return count ;
public class ScriptRunner { /** * This is used to read the * . log file and manage any necessary * transaction rollback . */ public static void runScript ( Database database , String logFilename , int logType ) { } }
IntKeyHashMap sessionMap = new IntKeyHashMap ( ) ; Session current = null ; int currentId = 0 ; database . setReferentialIntegrity ( false ) ; ScriptReaderBase scr = null ; String statement ; int statementType ; try { StopWatch sw = new StopWatch ( ) ; scr = ScriptReaderBase . newScriptReader ( database , logFilename , logType ) ; while ( scr . readLoggedStatement ( current ) ) { int sessionId = scr . getSessionNumber ( ) ; if ( current == null || currentId != sessionId ) { currentId = sessionId ; current = ( Session ) sessionMap . get ( currentId ) ; if ( current == null ) { current = database . getSessionManager ( ) . newSession ( database , database . getUserManager ( ) . getSysUser ( ) , false , true , 0 ) ; sessionMap . put ( currentId , current ) ; } } if ( current . isClosed ( ) ) { sessionMap . remove ( currentId ) ; continue ; } Result result = null ; statementType = scr . getStatementType ( ) ; switch ( statementType ) { case ScriptReaderBase . ANY_STATEMENT : statement = scr . getLoggedStatement ( ) ; result = current . executeDirectStatement ( statement ) ; if ( result != null && result . isError ( ) ) { if ( result . getException ( ) != null ) { throw result . getException ( ) ; } throw Error . error ( result ) ; } break ; case ScriptReaderBase . SEQUENCE_STATEMENT : scr . getCurrentSequence ( ) . reset ( scr . getSequenceValue ( ) ) ; break ; case ScriptReaderBase . COMMIT_STATEMENT : current . commit ( false ) ; break ; case ScriptReaderBase . INSERT_STATEMENT : { current . beginAction ( null ) ; Object [ ] data = scr . getData ( ) ; scr . getCurrentTable ( ) . insertNoCheckFromLog ( current , data ) ; current . endAction ( Result . updateOneResult ) ; break ; } case ScriptReaderBase . DELETE_STATEMENT : { current . beginAction ( null ) ; Object [ ] data = scr . getData ( ) ; scr . getCurrentTable ( ) . deleteNoCheckFromLog ( current , data ) ; current . endAction ( Result . updateOneResult ) ; break ; } case ScriptReaderBase . SET_SCHEMA_STATEMENT : { current . setSchema ( scr . getCurrentSchema ( ) ) ; } } if ( current . isClosed ( ) ) { sessionMap . remove ( currentId ) ; } } } catch ( Throwable e ) { String message ; // catch out - of - memory errors and terminate if ( e instanceof EOFException ) { // end of file - normal end } else if ( e instanceof OutOfMemoryError ) { message = "out of memory processing " + logFilename + " line: " + scr . getLineNumber ( ) ; database . logger . appLog . logContext ( SimpleLog . LOG_ERROR , message ) ; throw Error . error ( ErrorCode . OUT_OF_MEMORY ) ; } else { // stop processing on bad log line message = logFilename + " line: " + scr . getLineNumber ( ) + " " + e . toString ( ) ; database . logger . appLog . logContext ( SimpleLog . LOG_ERROR , message ) ; } } finally { if ( scr != null ) { scr . close ( ) ; } database . getSessionManager ( ) . closeAllSessions ( ) ; database . setReferentialIntegrity ( true ) ; }
public class TypeInterestFactory { /** * Register a regex pattern to filter interest in certain Java types . * @ param sourceKey Identifier of who gave the pattern to us ( so that we can update it ) */ public static void registerInterest ( String sourceKey , String regex , String rewritePattern , List < TypeReferenceLocation > locations ) { } }
registerInterest ( sourceKey , regex , rewritePattern , locations . toArray ( new TypeReferenceLocation [ locations . size ( ) ] ) ) ;
public class ColumnPrinter { /** * Add a value to the first column with the given name * @ param columnName name of the column to add to * @ param value value to add */ void addValue ( String columnName , String value ) { } }
addValue ( columnNames . indexOf ( columnName ) , value ) ;
public class CheckJSDoc { /** * Whether this node ' s JSDoc may apply to a function * < p > This has some false positive cases , to allow for patterns like goog . abstractMethod . */ private boolean isJSDocOnFunctionNode ( Node n , JSDocInfo info ) { } }
switch ( n . getToken ( ) ) { case FUNCTION : case GETTER_DEF : case SETTER_DEF : case MEMBER_FUNCTION_DEF : case STRING_KEY : case COMPUTED_PROP : case EXPORT : return true ; case GETELEM : case GETPROP : if ( n . getFirstChild ( ) . isQualifiedName ( ) ) { // assume qualified names may be function declarations return true ; } return false ; case VAR : case LET : case CONST : case ASSIGN : { Node lhs = n . getFirstChild ( ) ; Node rhs = NodeUtil . getRValueOfLValue ( lhs ) ; if ( rhs != null && isClass ( rhs ) && ! info . isConstructor ( ) ) { return false ; } // TODO ( b / 124081098 ) : Check that the RHS of the assignment is a // function . Note that it can be a FUNCTION node , but it can also be // a call to goog . abstractMethod , goog . functions . constant , etc . return true ; } default : return false ; }
public class RythmEngine { /** * static ThreadLocal < Integer > cceCounter = new ThreadLocal < Integer > ( ) ; */ private ITemplate getTemplate ( IDialect dialect , String template , Object ... args ) { } }
if ( S . empty ( template ) ) { return EmptyTemplate . INSTANCE ; } boolean typeInferenceEnabled = conf ( ) . typeInferenceEnabled ( ) ; if ( typeInferenceEnabled ) { ParamTypeInferencer . registerParams ( this , args ) ; } String key = template ; if ( typeInferenceEnabled ) { key += ParamTypeInferencer . uuid ( ) ; } TemplateClass tc = classes ( ) . getByTemplate ( key ) ; if ( null == tc ) { tc = new TemplateClass ( template , this , dialect ) ; } ITemplate t = tc . asTemplate ( this ) ; setRenderArgs ( t , args ) ; return t ;
public class CUtil { /** * This method is exposed so test classes can overload and test the * arguments without actually spawning the compiler */ public static int runCommand ( final CCTask task , final File workingDir , final String [ ] cmdline , final boolean newEnvironment , final Environment env ) throws BuildException { } }
try { task . log ( Commandline . toString ( cmdline ) , task . getCommandLogLevel ( ) ) ; /* final Execute exe = new Execute ( new LogStreamHandler ( task , Project . MSG _ INFO , Project . MSG _ ERR ) ) ; if ( System . getProperty ( " os . name " ) . equals ( " OS / 390 " ) ) { exe . setVMLauncher ( false ) ; exe . setAntRun ( task . getProject ( ) ) ; exe . setCommandline ( cmdline ) ; exe . setWorkingDirectory ( workingDir ) ; if ( env ! = null ) { final String [ ] environment = env . getVariables ( ) ; if ( environment ! = null ) { for ( final String element : environment ) { task . log ( " Setting environment variable : " + element , Project . MSG _ VERBOSE ) ; exe . setEnvironment ( environment ) ; exe . setNewenvironment ( newEnvironment ) ; return exe . execute ( ) ; */ return CommandExecution . runCommand ( cmdline , workingDir , task ) ; } catch ( final java . io . IOException exc ) { throw new BuildException ( "Could not launch " + cmdline [ 0 ] + ": " + exc , task . getLocation ( ) ) ; }
public class RulesProfile { /** * Note : disabled rules are excluded . */ @ CheckForNull public ActiveRule getActiveRuleByConfigKey ( String repositoryKey , String configKey ) { } }
for ( ActiveRule activeRule : activeRules ) { if ( StringUtils . equals ( activeRule . getRepositoryKey ( ) , repositoryKey ) && StringUtils . equals ( activeRule . getConfigKey ( ) , configKey ) && activeRule . isEnabled ( ) ) { return activeRule ; } } return null ;
public class HalResource { /** * Removes one link for the given relation and index . * @ param relation Link relation * @ param index Array index * @ return HAL resource */ public HalResource removeLink ( String relation , int index ) { } }
return removeResource ( HalResourceType . LINKS , relation , index ) ;
public class Compiler { /** * Fill any empty modules with a place holder file . It makes any cross module motion easier . */ private void fillEmptyModules ( Iterable < JSModule > modules ) { } }
for ( JSModule module : modules ) { if ( ! module . getName ( ) . equals ( JSModule . WEAK_MODULE_NAME ) && module . getInputs ( ) . isEmpty ( ) ) { CompilerInput input = new CompilerInput ( SourceFile . fromCode ( createFillFileName ( module . getName ( ) ) , "" ) ) ; input . setCompiler ( this ) ; module . add ( input ) ; } }
public class ItemViewHolder { /** * Gets the listener object that was passed into the Adapter through its constructor and cast * it to a given type . * @ param type the type of the listener * @ return the listener casted to the given type or null if not listener was set into the Adapter . */ @ Nullable public < P > P getListener ( Class < P > type ) { } }
if ( mListener != null ) { return type . cast ( mListener ) ; } return null ;
public class Manager { /** * Generates and dispatches an SDK - specific spoken announcement . * For backwards compatibility , we ' re constructing an event from scratch * using the appropriate event type . If your application only targets SDK * 16 + , you can just call View . announceForAccessibility ( CharSequence ) . * note : AccessibilityManager is only available from API lvl 4. * Adapted from https : / / http : / / eyes - free . googlecode . com / files / accessibility _ codelab _ demos _ v2 _ src . zip * via https : / / github . com / coreform / android - formidable - validation * @ param context * Used to get { @ link AccessibilityManager } * @ param text * The text to announce . */ public static void announceForAccessibilityCompat ( Context context , CharSequence text ) { } }
if ( Build . VERSION . SDK_INT >= 4 ) { AccessibilityManager accessibilityManager = null ; if ( null != context ) { accessibilityManager = ( AccessibilityManager ) context . getSystemService ( Context . ACCESSIBILITY_SERVICE ) ; } if ( null == accessibilityManager || ! accessibilityManager . isEnabled ( ) ) { return ; } // Prior to SDK 16 , announcements could only be made through FOCUSED // events . Jelly Bean ( SDK 16 ) added support for speaking text verbatim // using the ANNOUNCEMENT event type . final int eventType ; if ( Build . VERSION . SDK_INT < 16 ) { eventType = AccessibilityEvent . TYPE_VIEW_FOCUSED ; } else { eventType = AccessibilityEvent . TYPE_ANNOUNCEMENT ; } // Construct an accessibility event with the minimum recommended // attributes . An event without a class name or package may be dropped . final AccessibilityEvent event = AccessibilityEvent . obtain ( eventType ) ; event . getText ( ) . add ( text ) ; event . setClassName ( Manager . class . getName ( ) ) ; event . setPackageName ( context . getPackageName ( ) ) ; // Sends the event directly through the accessibility manager . If your // application only targets SDK 14 + , you should just call // getParent ( ) . requestSendAccessibilityEvent ( this , event ) ; accessibilityManager . sendAccessibilityEvent ( event ) ; }
public class WriteClass { /** * Take out the spaces in the name . */ public String fixSQLName ( String strName ) { } }
int index = 0 ; while ( index != - 1 ) { index = strName . indexOf ( ' ' ) ; if ( index != - 1 ) strName = strName . substring ( 0 , index ) + strName . substring ( index + 1 , strName . length ( ) ) ; } return strName ;
public class SocketController { /** * Create and connect socket . */ public void connectSocket ( ) { } }
synchronized ( lock ) { if ( isForegrounded ) { if ( socketConnection == null ) { SocketFactory factory = new SocketFactory ( socketURI , new SocketEventDispatcher ( listener , new Parser ( ) ) . setLogger ( log ) , log ) ; socketConnection = new SocketConnectionController ( new Handler ( Looper . getMainLooper ( ) ) , dataMgr , factory , listener , new RetryStrategy ( 60 , 60000 ) , log ) ; socketConnection . setProxy ( proxyURI ) ; socketConnection . connect ( ) ; } else { socketConnection . connect ( ) ; } socketConnection . setManageReconnection ( true ) ; } lock . notifyAll ( ) ; }
public class AmazonElastiCacheAsyncClient { /** * Simplified method form for invoking the DescribeCacheSecurityGroups operation with an AsyncHandler . * @ see # describeCacheSecurityGroupsAsync ( DescribeCacheSecurityGroupsRequest , com . amazonaws . handlers . AsyncHandler ) */ @ Override public java . util . concurrent . Future < DescribeCacheSecurityGroupsResult > describeCacheSecurityGroupsAsync ( com . amazonaws . handlers . AsyncHandler < DescribeCacheSecurityGroupsRequest , DescribeCacheSecurityGroupsResult > asyncHandler ) { } }
return describeCacheSecurityGroupsAsync ( new DescribeCacheSecurityGroupsRequest ( ) , asyncHandler ) ;
public class InfomationDialog { /** * Statische Methode um ein Dialogfenster mit der angegebener Nachricht zu erzeugen . * @ param owner * the owner * @ param message * the message * @ return das ergebnis */ public static String showInfoDialog ( final Frame owner , final String message ) { } }
InfomationDialog mdialog ; String ok = "OK" ; mdialog = new InfomationDialog ( owner , "Information message" , message , ok ) ; @ SuppressWarnings ( "unlikely-arg-type" ) final int index = mdialog . getVButtons ( ) . indexOf ( ok ) ; final Button button = mdialog . getVButtons ( ) . get ( index ) ; button . addActionListener ( mdialog ) ; mdialog . setVisible ( true ) ; return mdialog . getResult ( ) ;
public class DFSClient { /** * Return the Disk status for current namespace */ public DiskStatus getNSDiskStatus ( ) throws IOException { } }
long rawNums [ ] = namenode . getStats ( ) ; // rawNums [ 6 ] should be capacityNamespaceUsed long dfsUsed = ( rawNums . length > 6 ) ? rawNums [ 6 ] : rawNums [ 1 ] ; return new DiskStatus ( rawNums [ 0 ] , dfsUsed , rawNums [ 2 ] ) ;
public class HyperionClient { /** * Create the proper exception for the error response * @ param response The http response * @ return The exception for the error response * @ throws IOException */ protected HyperionException readException ( Response response ) throws IOException { } }
ErrorResponse errorResponse = null ; try { errorResponse = objectMapper . readValue ( response . body ( ) . byteStream ( ) , ErrorResponse . class ) ; } catch ( Exception ignore ) { } HyperionException resolvedException = null ; if ( errorResponse != null ) { try { Class exceptionClass = Class . forName ( errorResponse . getType ( ) ) ; resolvedException = ( HyperionException ) exceptionClass . getConstructor ( String . class ) . newInstance ( errorResponse . getMessage ( ) ) ; } catch ( Throwable ignore ) { } if ( resolvedException == null ) { resolvedException = new HyperionException ( errorResponse . getStatusCode ( ) , errorResponse . getMessage ( ) ) ; } resolvedException . setErrorDetails ( errorResponse . getErrorDetails ( ) ) ; resolvedException . setErrorTime ( errorResponse . getErrorTime ( ) ) ; resolvedException . setRequestId ( errorResponse . getRequestId ( ) ) ; } if ( resolvedException == null ) { resolvedException = new HyperionException ( response . code ( ) , response . message ( ) ) ; } return resolvedException ;
public class AsImpl { /** * write the { @ link PayloadData } to underlying { @ link AspImpl } . If the state of As is PENDING , the PayloadData is stored in * pending queue . * @ param message * @ throws IOException */ protected void write ( PayloadData message ) throws IOException { } }
FSM fsm = null ; boolean isASPLocalFsm = true ; if ( this . functionality == Functionality . AS || ( this . functionality == Functionality . SGW && this . exchangeType == ExchangeType . DE ) || ( this . functionality == Functionality . IPSP && this . ipspType == IPSPType . CLIENT ) || ( this . functionality == Functionality . IPSP && this . ipspType == IPSPType . SERVER && this . exchangeType == ExchangeType . DE ) ) { fsm = this . peerFSM ; } else { fsm = this . localFSM ; isASPLocalFsm = false ; } int sls = message . getData ( ) . getSLS ( ) ; switch ( AsState . getState ( fsm . getState ( ) . getName ( ) ) ) { case ACTIVE : boolean aspFound = false ; // TODO : Algo to select correct ASP int aspIndex = ( sls & this . aspSlsMask ) ; aspIndex = ( aspIndex >> this . aspSlsShiftPlaces ) ; AspImpl aspCong = null ; for ( int i = 0 ; i < this . appServerProcs . size ( ) ; i ++ ) { AspImpl aspTemp = ( AspImpl ) this . appServerProcs . get ( this . slsVsAspTable [ aspIndex ++ ] ) ; FSM aspFsm = null ; if ( isASPLocalFsm ) { aspFsm = aspTemp . getLocalFSM ( ) ; } else { aspFsm = aspTemp . getPeerFSM ( ) ; } if ( AspState . getState ( aspFsm . getState ( ) . getName ( ) ) == AspState . ACTIVE ) { if ( aspTemp . getAspFactory ( ) . getAssociation ( ) . getCongestionLevel ( ) > 1 ) { aspCong = aspTemp ; } else { aspTemp . getAspFactory ( ) . write ( message ) ; aspFound = true ; if ( aspTrafficListener != null ) { try { aspTrafficListener . onAspMessage ( aspTemp . getName ( ) , message . getData ( ) . getData ( ) ) ; } catch ( Exception e ) { logger . error ( String . format ( "Error while calling aspTrafficListener=%s onAspMessage method for Asp=%s" , aspTrafficListener , aspTemp ) ) ; } } break ; } } } // for if ( ! aspFound ) { if ( aspCong != null ) { aspCong . getAspFactory ( ) . write ( message ) ; aspFound = true ; if ( aspTrafficListener != null ) { try { aspTrafficListener . onAspMessage ( aspCong . getName ( ) , message . getData ( ) . getData ( ) ) ; } catch ( Exception e ) { logger . error ( String . format ( "Error while calling aspTrafficListener=%s onAspMessage method for Asp=%s" , aspTrafficListener , aspCong ) ) ; } } } } if ( ! aspFound ) { // This should never happen . logger . error ( String . format ( "Tx : no ACTIVE Asp for message=%s" , message ) ) ; } break ; case PENDING : if ( logger . isInfoEnabled ( ) ) { logger . info ( String . format ( "Adding the PayloadData=%s to PendingQueue for AS=%s" , message . toString ( ) , this . name ) ) ; } this . penQueue . add ( message ) ; break ; default : throw new IOException ( String . format ( "As name=%s is not ACTIVE" , this . name ) ) ; }
public class LegacySynchronousBus { /** * Sends a message synchronously to all registered destinations ; message * handling code in the destinations will run in the same thread as the sender * object ' s . * @ see org . dihedron . patterns . bus . Bus # send ( java . lang . Object ) */ @ Override public Bus < M > send ( M message ) { } }
logger . trace ( "starting dispatching message '{}' to destinations" , message ) ; for ( Destination < M > destination : destinations ) { logger . trace ( "dispatching to destination {}" , destination ) ; destination . onMessage ( message ) ; } logger . trace ( "done dispatching message '{}' to destinations" , message ) ; return this ;
public class WSDirectorySocket { /** * { @ inheritDoc } */ @ Override public void onWebSocketText ( String text ) { } }
if ( LOGGER . isTraceEnabled ( ) ) { LOGGER . trace ( "WebSocket client received - " + text ) ; } ResponseDeserializer ds = null ; try { ds = WebSocketSerializer . getResponseDeserializer ( text ) ; clientConnection . onReceivedPesponse ( ds . deserializerResponseHeader ( ) , ds . deserializerResponse ( ) ) ; } catch ( IOException e ) { LOGGER . error ( "Parse response get exception" , e ) ; }
public class Tuple2 { /** * Apply attribute 1 as argument to a function and return a new tuple with the substituted argument . */ public final < U1 > Tuple2 < U1 , T2 > map1 ( Function < ? super T1 , ? extends U1 > function ) { } }
return Tuple . tuple ( function . apply ( v1 ) , v2 ) ;
public class EventImpl { /** * check if send event * @ return */ private boolean isSendEvent ( ) throws DevFailed { } }
boolean send = false ; if ( ( eventTrigger . doCheck ( ) && eventTrigger . isSendEvent ( ) ) || ! eventTrigger . doCheck ( ) ) { send = true ; } return send ;
public class TrainingsImpl { /** * Get the list of exports for a specific iteration . * @ param projectId The project id * @ param iterationId The iteration id * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the List & lt ; Export & gt ; object */ public Observable < ServiceResponse < List < Export > > > getExportsWithServiceResponseAsync ( UUID projectId , UUID iterationId ) { } }
if ( projectId == null ) { throw new IllegalArgumentException ( "Parameter projectId is required and cannot be null." ) ; } if ( iterationId == null ) { throw new IllegalArgumentException ( "Parameter iterationId is required and cannot be null." ) ; } if ( this . client . apiKey ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.apiKey() is required and cannot be null." ) ; } return service . getExports ( projectId , iterationId , this . client . apiKey ( ) , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < List < Export > > > > ( ) { @ Override public Observable < ServiceResponse < List < Export > > > call ( Response < ResponseBody > response ) { try { ServiceResponse < List < Export > > clientResponse = getExportsDelegate ( response ) ; return Observable . just ( clientResponse ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ;
public class StringParameterValue { /** * Exposes the name / value as an environment variable . */ @ Override public void buildEnvironment ( Run < ? , ? > build , EnvVars env ) { } }
env . put ( name , value ) ; env . put ( name . toUpperCase ( Locale . ENGLISH ) , value ) ; // backward compatibility pre 1.345
public class TagsApi { /** * Get a list of repository tags from a project , sorted by name in reverse alphabetical order . * < pre > < code > GitLab Endpoint : GET / projects / : id / repository / tags < / code > < / pre > * @ param projectIdOrPath id , path of the project , or a Project instance holding the project ID or path * @ param itemsPerPage the number of Project instances that will be fetched per page * @ return the list of tags for the specified project ID * @ throws GitLabApiException if any exception occurs */ public Pager < Tag > getTags ( Object projectIdOrPath , int itemsPerPage ) throws GitLabApiException { } }
return ( new Pager < Tag > ( this , Tag . class , itemsPerPage , null , "projects" , getProjectIdOrPath ( projectIdOrPath ) , "repository" , "tags" ) ) ;
public class Formats { /** * Joins the given list of strings using the given delimiter delim */ public static String join ( List < String > list , String delim ) { } }
StringBuilder sb = new StringBuilder ( ) ; String loopDelim = "" ; for ( String s : list ) { sb . append ( loopDelim ) ; sb . append ( s ) ; loopDelim = delim ; } return sb . toString ( ) ;
public class FunctionType { /** * Notice that " call " and " bind " have the same argument signature , except that all the arguments * of " bind " ( except the first ) are optional . */ private FunctionType getCallOrBindSignature ( boolean isCall ) { } }
boolean isBind = ! isCall ; Builder builder = builder ( registry ) . withReturnType ( isCall ? getReturnType ( ) : getBindReturnType ( - 1 ) ) . withTemplateKeys ( getTemplateTypeMap ( ) . getTemplateKeys ( ) ) ; Node origParams = getParametersNode ( ) ; if ( origParams != null ) { Node params = origParams . cloneTree ( ) ; Node thisTypeNode = Node . newString ( Token . NAME , "thisType" ) ; thisTypeNode . setJSType ( registry . createOptionalNullableType ( getTypeOfThis ( ) ) ) ; params . addChildToFront ( thisTypeNode ) ; if ( isBind ) { // The arguments of bind ( ) are unique in that they are all // optional but not undefinable . for ( Node current = thisTypeNode . getNext ( ) ; current != null ; current = current . getNext ( ) ) { current . setOptionalArg ( true ) ; } } else if ( isCall ) { // The first argument of call ( ) is optional iff all the arguments // are optional . It ' s sufficient to check the first argument . Node firstArg = thisTypeNode . getNext ( ) ; if ( firstArg == null || firstArg . isOptionalArg ( ) || firstArg . isVarArgs ( ) ) { thisTypeNode . setOptionalArg ( true ) ; } } builder . withParamsNode ( params ) ; } return builder . build ( ) ;
public class Rescheduler { /** * / * must be called from the { @ link # serializingExecutor } originally passed in . */ void reschedule ( long delay , TimeUnit timeUnit ) { } }
long delayNanos = timeUnit . toNanos ( delay ) ; long newRunAtNanos = nanoTime ( ) + delayNanos ; enabled = true ; if ( newRunAtNanos - runAtNanos < 0 || wakeUp == null ) { if ( wakeUp != null ) { wakeUp . cancel ( false ) ; } wakeUp = scheduler . schedule ( new FutureRunnable ( ) , delayNanos , TimeUnit . NANOSECONDS ) ; } runAtNanos = newRunAtNanos ;
public class TableBodyBox { /** * Determine the minimal width of the column * @ param col the column index * @ return the minimal width of the column */ public int getMinimalColumnWidth ( int col ) { } }
int ret = 0 ; int r = 0 ; while ( r < getRowCount ( ) ) { TableCellBox cell = cells [ col ] [ r ] ; if ( cell != null ) { int min = cell . getMinimalWidth ( ) / cell . getColspan ( ) ; if ( min > ret ) ret = min ; r += cell . getRowspan ( ) ; } else r ++ ; } return ret ;
public class CPRuleAssetCategoryRelPersistenceImpl { /** * Returns the cp rule asset category rel with the primary key or returns < code > null < / code > if it could not be found . * @ param primaryKey the primary key of the cp rule asset category rel * @ return the cp rule asset category rel , or < code > null < / code > if a cp rule asset category rel with the primary key could not be found */ @ Override public CPRuleAssetCategoryRel fetchByPrimaryKey ( Serializable primaryKey ) { } }
Serializable serializable = entityCache . getResult ( CPRuleAssetCategoryRelModelImpl . ENTITY_CACHE_ENABLED , CPRuleAssetCategoryRelImpl . class , primaryKey ) ; if ( serializable == nullModel ) { return null ; } CPRuleAssetCategoryRel cpRuleAssetCategoryRel = ( CPRuleAssetCategoryRel ) serializable ; if ( cpRuleAssetCategoryRel == null ) { Session session = null ; try { session = openSession ( ) ; cpRuleAssetCategoryRel = ( CPRuleAssetCategoryRel ) session . get ( CPRuleAssetCategoryRelImpl . class , primaryKey ) ; if ( cpRuleAssetCategoryRel != null ) { cacheResult ( cpRuleAssetCategoryRel ) ; } else { entityCache . putResult ( CPRuleAssetCategoryRelModelImpl . ENTITY_CACHE_ENABLED , CPRuleAssetCategoryRelImpl . class , primaryKey , nullModel ) ; } } catch ( Exception e ) { entityCache . removeResult ( CPRuleAssetCategoryRelModelImpl . ENTITY_CACHE_ENABLED , CPRuleAssetCategoryRelImpl . class , primaryKey ) ; throw processException ( e ) ; } finally { closeSession ( session ) ; } } return cpRuleAssetCategoryRel ;
public class Proxy { /** * Sets the host of the proxy . * @ param host The host of the proxy , may be { @ code null } . * @ return The new proxy , never { @ code null } . */ public Proxy setHost ( String host ) { } }
if ( this . host . equals ( host ) || ( host == null && this . host . length ( ) <= 0 ) ) { return this ; } return new Proxy ( type , host , port , auth ) ;
public class Concat { /** * true when done */ public boolean with ( PrefixSet op ) { } }
PrefixSet next ; long tmp ; Prefix l ; Prefix r ; next = new PrefixSet ( ) ; l = todo . iterator ( ) ; while ( l . step ( ) ) { r = op . iterator ( ) ; while ( r . step ( ) ) { tmp = Prefix . concat ( l . data , r . data , k ) ; if ( tmp >= firstFullValue ) { done . add ( tmp ) ; } else { next . add ( tmp ) ; } } } todo = next ; return todo . isEmpty ( ) ;
public class AbstractXMPPConnection { /** * Process interceptors . Interceptors may modify the stanza that is about to be sent . * Since the thread that requested to send the stanza will invoke all interceptors , it * is important that interceptors perform their work as soon as possible so that the * thread does not remain blocked for a long period . * @ param packet the stanza that is going to be sent to the server */ private void firePacketInterceptors ( Stanza packet ) { } }
List < StanzaListener > interceptorsToInvoke = new LinkedList < > ( ) ; synchronized ( interceptors ) { for ( InterceptorWrapper interceptorWrapper : interceptors . values ( ) ) { if ( interceptorWrapper . filterMatches ( packet ) ) { interceptorsToInvoke . add ( interceptorWrapper . getInterceptor ( ) ) ; } } } for ( StanzaListener interceptor : interceptorsToInvoke ) { try { interceptor . processStanza ( packet ) ; } catch ( Exception e ) { LOGGER . log ( Level . SEVERE , "Packet interceptor threw exception" , e ) ; } }
public class JDOQueryFactory { /** * Create a new { @ link JDOQuery } instance with the given projection * @ param expr projection and source * @ param < T > * @ return select ( expr ) . from ( expr ) */ public < T > JDOQuery < T > selectFrom ( EntityPath < T > expr ) { } }
return select ( expr ) . from ( expr ) ;
public class ExecutionStrategy { /** * Builds the type info hierarchy for the current field * @ param executionContext the execution context in play * @ param parameters contains the parameters holding the fields to be executed and source object * @ param fieldDefinition the field definition to build type info for * @ param fieldContainer the field container * @ return a new type info */ protected ExecutionStepInfo createExecutionStepInfo ( ExecutionContext executionContext , ExecutionStrategyParameters parameters , GraphQLFieldDefinition fieldDefinition , GraphQLObjectType fieldContainer ) { } }
GraphQLOutputType fieldType = fieldDefinition . getType ( ) ; List < Argument > fieldArgs = parameters . getField ( ) . getArguments ( ) ; GraphQLCodeRegistry codeRegistry = executionContext . getGraphQLSchema ( ) . getCodeRegistry ( ) ; Map < String , Object > argumentValues = valuesResolver . getArgumentValues ( codeRegistry , fieldDefinition . getArguments ( ) , fieldArgs , executionContext . getVariables ( ) ) ; return newExecutionStepInfo ( ) . type ( fieldType ) . fieldDefinition ( fieldDefinition ) . fieldContainer ( fieldContainer ) . field ( parameters . getField ( ) ) . path ( parameters . getPath ( ) ) . parentInfo ( parameters . getExecutionStepInfo ( ) ) . arguments ( argumentValues ) . build ( ) ;
public class MessageBuilder { /** * Creates a FLUSH _ REQ message . * @ param body the data of the request . * @ return a protobuf message . */ public static Message buildFlushRequest ( ByteBuffer body ) { } }
ZabMessage . FlushRequest flushReq = ZabMessage . FlushRequest . newBuilder ( ) . setBody ( ByteString . copyFrom ( body ) ) . build ( ) ; return Message . newBuilder ( ) . setType ( MessageType . FLUSH_REQ ) . setFlushRequest ( flushReq ) . build ( ) ;
public class CapabilityRegistry { /** * Creates updateable version of capability registry that on publish pushes all changes to main registry * this is used to create context local registry that only on completion commits changes to main registry * @ return writable registry */ CapabilityRegistry createShadowCopy ( ) { } }
CapabilityRegistry result = new CapabilityRegistry ( forServer , this ) ; readLock . lock ( ) ; try { try { result . writeLock . lock ( ) ; copy ( this , result ) ; } finally { result . writeLock . unlock ( ) ; } } finally { readLock . unlock ( ) ; } return result ;
public class RequiresNew { /** * < p > If called outside a transaction context , the interceptor must begin a new * JTA transaction , the managed bean method execution must then continue * inside this transaction context , and the transaction must be completed by * the interceptor . < / p > * < p > If called inside a transaction context , the current transaction context must * be suspended , a new JTA transaction will begin , the managed bean method * execution must then continue inside this transaction context , the transaction * must be completed , and the previously suspended transaction must be resumed . < / p > */ @ AroundInvoke public Object requiresNew ( final InvocationContext context ) throws Exception { } }
return runUnderUOWManagingEnablement ( UOWSynchronizationRegistry . UOW_TYPE_GLOBAL_TRANSACTION , false , context , "REQUIRES_NEW" ) ;
public class CommerceTaxMethodUtil { /** * Returns the commerce tax method where groupId = & # 63 ; and engineKey = & # 63 ; or returns < code > null < / code > if it could not be found , optionally using the finder cache . * @ param groupId the group ID * @ param engineKey the engine key * @ param retrieveFromCache whether to retrieve from the finder cache * @ return the matching commerce tax method , or < code > null < / code > if a matching commerce tax method could not be found */ public static CommerceTaxMethod fetchByG_E ( long groupId , String engineKey , boolean retrieveFromCache ) { } }
return getPersistence ( ) . fetchByG_E ( groupId , engineKey , retrieveFromCache ) ;
public class HTTPBatchClientConnectionInterceptor { /** * Configures proxy if this is applicable to connection * @ throws FMSException */ public SSLConnectionSocketFactory prepareClientSSL ( ) { } }
try { String path = Config . getProperty ( Config . PROXY_KEYSTORE_PATH ) ; String pass = Config . getProperty ( Config . PROXY_KEYSTORE_PASSWORD ) ; KeyStore trustStore = null ; if ( path != null && pass != null ) { trustStore = KeyStore . getInstance ( KeyStore . getDefaultType ( ) ) ; FileInputStream instream = new FileInputStream ( new File ( path ) ) ; try { trustStore . load ( instream , pass . toCharArray ( ) ) ; } finally { instream . close ( ) ; } } SSLContext sslContext = SSLContexts . custom ( ) . loadTrustMaterial ( trustStore , new TrustSelfSignedStrategy ( ) ) . build ( ) ; String tlsVersion = Config . getProperty ( Config . TLS_VERSION ) ; SSLConnectionSocketFactory sslConnectionFactory = new SSLConnectionSocketFactory ( sslContext , new String [ ] { tlsVersion } , null , new NoopHostnameVerifier ( ) ) ; return sslConnectionFactory ; } catch ( Exception ex ) { LOG . error ( "couldn't create httpClient!! {}" , ex . getMessage ( ) , ex ) ; return null ; }
public class LettuceSets { /** * Creates a new { @ code HashSet } containing all elements from { @ code elements } . * @ param elements the elements that the set should contain , must not be { @ literal null } . * @ param < T > the element type * @ return a new { @ code HashSet } containing all elements from { @ code elements } . */ public static < T > Set < T > newHashSet ( Collection < ? extends T > elements ) { } }
LettuceAssert . notNull ( elements , "Collection must not be null" ) ; HashSet < T > set = new HashSet < > ( elements . size ( ) ) ; set . addAll ( elements ) ; return set ;
public class StringUtils { /** * Strips the trailing slash from a string if it has a trailing slash ; otherwise return the * string unchanged . */ public static String stripTrailingSlash ( String s ) { } }
return s == null ? null : CharMatcher . is ( '/' ) . trimTrailingFrom ( s ) ;
public class CacheOnDisk { /** * Call this method to write a dependency id with a collection of cache ids to the disk . * @ param id * - dependency id . * @ param vs * - a collection of cache ids . */ public int writeDependency ( Object id , ValueSet vs ) { } }
// SKS - O int returnCode = htod . writeDependency ( id , vs ) ; if ( returnCode == HTODDynacache . DISK_EXCEPTION ) { stopOnError ( this . htod . diskCacheException ) ; } return returnCode ;
public class LockCache { /** * If the size of the cache exceeds the soft limit and no other thread is evicting entries , * start evicting entries . */ private void evictIfOverLimit ( ) { } }
int numToEvict = mCache . size ( ) - mSoftLimit ; if ( numToEvict <= 0 ) { return ; } if ( mEvictLock . tryLock ( ) ) { try { // update the total number to evict while we are holding the lock numToEvict = mCache . size ( ) - mSoftLimit ; // This thread won the race as the evictor while ( numToEvict > 0 ) { if ( ! mIterator . hasNext ( ) ) { mIterator = mCache . entrySet ( ) . iterator ( ) ; } Map . Entry < K , ValNode > candidateMapEntry = mIterator . next ( ) ; ValNode candidate = candidateMapEntry . getValue ( ) ; if ( candidate . mIsAccessed ) { candidate . mIsAccessed = false ; } else { if ( candidate . mRefCount . compareAndSet ( 0 , Integer . MIN_VALUE ) ) { // the value object can be evicted , at the same time we make refCount minValue mIterator . remove ( ) ; numToEvict -- ; } } } } finally { mEvictLock . unlock ( ) ; } }
public class TokenFelligiSunter { /** * Explain how the distance was computed . * In the output , the tokens in S and T are listed , and the * common tokens are marked with an asterisk . */ public String explainScore ( StringWrapper s , StringWrapper t ) { } }
BagOfTokens sBag = ( BagOfTokens ) s ; BagOfTokens tBag = ( BagOfTokens ) t ; StringBuffer buf = new StringBuffer ( "" ) ; PrintfFormat fmt = new PrintfFormat ( "%.3f" ) ; buf . append ( "Common tokens: " ) ; for ( Iterator i = sBag . tokenIterator ( ) ; i . hasNext ( ) ; ) { Token tok = ( Token ) i . next ( ) ; if ( tBag . contains ( tok ) ) { buf . append ( " " + tok . getValue ( ) + ": " ) ; buf . append ( fmt . sprintf ( tBag . getWeight ( tok ) ) ) ; } } buf . append ( "\nscore = " + score ( s , t ) ) ; return buf . toString ( ) ;
public class EnvironmentPlatformMarshaller { /** * Marshall the given parameter object . */ public void marshall ( EnvironmentPlatform environmentPlatform , ProtocolMarshaller protocolMarshaller ) { } }
if ( environmentPlatform == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( environmentPlatform . getPlatform ( ) , PLATFORM_BINDING ) ; protocolMarshaller . marshall ( environmentPlatform . getLanguages ( ) , LANGUAGES_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class DefaultSemanticHighlightingCalculator { /** * Highlights an object at the position of the given { @ link EStructuralFeature } */ protected void highlightFeature ( IHighlightedPositionAcceptor acceptor , EObject object , EStructuralFeature feature , String ... styleIds ) { } }
List < INode > children = NodeModelUtils . findNodesForFeature ( object , feature ) ; if ( children . size ( ) > 0 ) highlightNode ( acceptor , children . get ( 0 ) , styleIds ) ;
public class TextUtils { /** * Returns whether the given CharSequence contains only digits . */ public static boolean isDigitsOnly ( CharSequence str ) { } }
final int len = str . length ( ) ; for ( int i = 0 ; i < len ; i ++ ) { if ( ! Character . isDigit ( str . charAt ( i ) ) ) { return false ; } } return true ;
public class CompressionUtil { /** * Decode bz raw byte [ ] . * @ param data the data * @ return the byte [ ] */ public static byte [ ] decodeBZRaw ( byte [ ] data ) { } }
try { ByteArrayInputStream output = new ByteArrayInputStream ( data ) ; BZip2CompressorInputStream compresser = new BZip2CompressorInputStream ( output ) ; return IOUtils . toByteArray ( compresser ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; }
public class CoreRemoteMongoCollectionImpl { /** * Finds a document in the collection and replaces it with the given document * @ param filter the query filter * @ param replacement the document to replace the matched document with * @ param options A RemoteFindOneAndModifyOptions struct * @ param resultClass the class to decode each document into * @ param < ResultT > the target document type of the iterable . * @ return the resulting document */ public < ResultT > ResultT findOneAndReplace ( final Bson filter , final Bson replacement , final RemoteFindOneAndModifyOptions options , final Class < ResultT > resultClass ) { } }
return operations . findOneAndModify ( "findOneAndReplace" , filter , replacement , options , resultClass ) . execute ( service ) ;
public class Histogram { /** * Construct a new histogram by decoding it from a ByteBuffer . * @ param buffer The buffer to decode from * @ param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high * @ return The newly constructed histogram */ public static Histogram decodeFromByteBuffer ( final ByteBuffer buffer , final long minBarForHighestTrackableValue ) { } }
return ( Histogram ) decodeFromByteBuffer ( buffer , Histogram . class , minBarForHighestTrackableValue ) ;
public class BackendManager { /** * call the an appropriate request dispatcher */ private JSONObject callRequestDispatcher ( JmxRequest pJmxReq ) throws InstanceNotFoundException , AttributeNotFoundException , ReflectionException , MBeanException , IOException , NotChangedException { } }
Object retValue = null ; boolean useValueWithPath = false ; boolean found = false ; for ( RequestDispatcher dispatcher : requestDispatchers ) { if ( dispatcher . canHandle ( pJmxReq ) ) { retValue = dispatcher . dispatchRequest ( pJmxReq ) ; useValueWithPath = dispatcher . useReturnValueWithPath ( pJmxReq ) ; found = true ; break ; } } if ( ! found ) { throw new IllegalStateException ( "Internal error: No dispatcher found for handling " + pJmxReq ) ; } JsonConvertOptions opts = getJsonConvertOptions ( pJmxReq ) ; Object jsonResult = converters . getToJsonConverter ( ) . convertToJson ( retValue , useValueWithPath ? pJmxReq . getPathParts ( ) : null , opts ) ; JSONObject jsonObject = new JSONObject ( ) ; jsonObject . put ( "value" , jsonResult ) ; jsonObject . put ( "request" , pJmxReq . toJSON ( ) ) ; return jsonObject ;
public class Listener { /** * Sets the position of the listener . */ public void setPosition ( float x , float y , float z ) { } }
if ( _px != x || _py != y || _pz != z ) { AL10 . alListener3f ( AL10 . AL_POSITION , _px = x , _py = y , _pz = z ) ; }
public class Settings { /** * Sets the Executor used by the SDK for non - AsyncTask background work . * @ param executor * the Executor to use ; must not be null . */ public static void setExecutor ( Executor executor ) { } }
Validate . notNull ( executor , "executor" ) ; synchronized ( LOCK ) { Settings . executor = executor ; }
public class Utils { /** * NULL safe addAll ( ) */ public static < T > List < T > addAll ( List < T > to , List < ? extends T > what ) { } }
List < T > data = safeList ( to ) ; if ( ! isEmpty ( what ) ) { data . addAll ( what ) ; } return data ;
public class LabeledStatement { /** * Returns label with specified name from the label list for * this labeled statement . Returns { @ code null } if there is no * label with that name in the list . */ public Label getLabelByName ( String name ) { } }
for ( Label label : labels ) { if ( name . equals ( label . getName ( ) ) ) { return label ; } } return null ;
public class MDDImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public void setYmSize ( Integer newYmSize ) { } }
Integer oldYmSize = ymSize ; ymSize = newYmSize ; if ( eNotificationRequired ( ) ) eNotify ( new ENotificationImpl ( this , Notification . SET , AfplibPackage . MDD__YM_SIZE , oldYmSize , ymSize ) ) ;
public class FeatureLaplacePyramid { /** * See if the best score is better than the local adjusted scores at this scale */ private boolean checkMax ( T image , double adj , double bestScore , int c_x , int c_y ) { } }
sparseLaplace . setImage ( image ) ; boolean isMax = true ; beginLoop : for ( int i = c_y - 1 ; i <= c_y + 1 ; i ++ ) { for ( int j = c_x - 1 ; j <= c_x + 1 ; j ++ ) { double value = adj * sparseLaplace . compute ( j , i ) ; if ( value >= bestScore ) { isMax = false ; break beginLoop ; } } } return isMax ;
public class ClassInfo { /** * Add annotation default values . ( Only called in the case of annotation class definitions , when the annotation * has default parameter values . ) * @ param paramNamesAndValues * the default param names and values , if this is an annotation */ void addAnnotationParamDefaultValues ( final AnnotationParameterValueList paramNamesAndValues ) { } }
if ( this . annotationDefaultParamValues == null ) { this . annotationDefaultParamValues = paramNamesAndValues ; } else { this . annotationDefaultParamValues . addAll ( paramNamesAndValues ) ; }
public class FormModelFactory { /** * Returns the child of the formModel with the given page name . * @ param formModel the parent model to get the child from * @ param childPageName the name of the child to retrieve * @ return null the child can not be found * @ throws IllegalArgumentException if childPageName or formModel are null */ public FormModel getChild ( HierarchicalFormModel formModel , String childPageName ) { } }
if ( childPageName == null ) throw new IllegalArgumentException ( "childPageName == null" ) ; if ( formModel == null ) throw new IllegalArgumentException ( "formModel == null" ) ; final FormModel [ ] children = formModel . getChildren ( ) ; if ( children == null ) return null ; for ( int i = 0 ; i < children . length ; i ++ ) { final FormModel child = children [ i ] ; if ( childPageName . equals ( child . getId ( ) ) ) return child ; } return null ;
public class BlobContainersInner { /** * Gets the existing immutability policy along with the corresponding ETag in response headers and body . * @ param resourceGroupName The name of the resource group within the user ' s subscription . The name is case insensitive . * @ param accountName The name of the storage account within the specified resource group . Storage account names must be between 3 and 24 characters in length and use numbers and lower - case letters only . * @ param containerName The name of the blob container within the specified storage account . Blob container names must be between 3 and 63 characters in length and use numbers , lower - case letters and dash ( - ) only . Every dash ( - ) character must be immediately preceded and followed by a letter or number . * @ param ifMatch The entity state ( ETag ) version of the immutability policy to update . A value of " * " can be used to apply the operation only if the immutability policy already exists . If omitted , this operation will always be applied . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the ImmutabilityPolicyInner object if successful . */ public ImmutabilityPolicyInner getImmutabilityPolicy ( String resourceGroupName , String accountName , String containerName , String ifMatch ) { } }
return getImmutabilityPolicyWithServiceResponseAsync ( resourceGroupName , accountName , containerName , ifMatch ) . toBlocking ( ) . single ( ) . body ( ) ;