signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class BaseSparseNDArrayCOO { /** * Create a DataBuffer for indices of given arrays of indices . * @ param indices * @ param shape * @ return */ protected static DataBuffer createIndiceBuffer ( long [ ] [ ] indices , long [ ] shape ) { } }
checkNotNull ( indices ) ; checkNotNull ( shape ) ; if ( indices . length == 0 ) { return Nd4j . getDataBufferFactory ( ) . createLong ( shape . length ) ; } if ( indices . length == shape . length ) { return Nd4j . createBuffer ( ArrayUtil . flattenF ( indices ) ) ; } return Nd4j . createBuffer ( ArrayUtil . flatten ( indices ) ) ;
public class FilePickerFragment { /** * Name is validated to be non - null , non - empty and not containing any * slashes . * @ param name The name of the folder the user wishes to create . */ @ Override public void onNewFolder ( @ NonNull final String name ) { } }
File folder = new File ( mCurrentPath , name ) ; if ( folder . mkdir ( ) ) { refresh ( folder ) ; } else { Toast . makeText ( getActivity ( ) , R . string . nnf_create_folder_error , Toast . LENGTH_SHORT ) . show ( ) ; }
public class InsightClient { /** * Perform an Advanced Insight Request with a number . * @ param number A single phone number that you need insight about in national or international format . * @ return A { @ link AdvancedInsightResponse } representing the response from the Nexmo Number Insight API . * @ throws IOException if a network error occurred contacting the Nexmo Nexmo Number Insight API . * @ throws NexmoClientException if there was a problem with the Nexmo request or response objects . */ public AdvancedInsightResponse getAdvancedNumberInsight ( String number ) throws IOException , NexmoClientException { } }
return getAdvancedNumberInsight ( AdvancedInsightRequest . withNumber ( number ) ) ;
public class OrganizationHandler { /** * Returns an Organization * @ param organizationId String * @ return DbOrganization */ public DbOrganization getOrganization ( final String organizationId ) { } }
final DbOrganization dbOrganization = repositoryHandler . getOrganization ( organizationId ) ; if ( dbOrganization == null ) { throw new WebApplicationException ( Response . status ( Response . Status . NOT_FOUND ) . entity ( "Organization " + organizationId + " does not exist." ) . build ( ) ) ; } return dbOrganization ;
public class LdapTemplate { /** * { @ inheritDoc } */ @ Override public < T > List < T > findAll ( Class < T > clazz ) { } }
return findAll ( LdapUtils . emptyLdapName ( ) , getDefaultSearchControls ( defaultSearchScope , RETURN_OBJ_FLAG , ALL_ATTRIBUTES ) , clazz ) ;
public class WstxEventFactory { /** * Must override this method to use a more efficient StartElement * implementation */ @ SuppressWarnings ( "unchecked" ) @ Override protected StartElement createStartElement ( QName name , Iterator < ? > attr , Iterator < ? > ns , NamespaceContext ctxt ) { } }
return SimpleStartElement . construct ( mLocation , name , ( Iterator < Attribute > ) attr , ( Iterator < Namespace > ) ns , ctxt ) ;
public class VarSetBuilder { /** * Adds a variable set annotation if the given value is non - null */ public VarSetBuilder hasIf ( String name , Object value ) { } }
return value != null ? has ( name , value ) : this ;
public class LogoutRequestState { /** * { @ inheritDoc } */ public final void execute ( ) throws InternetSCSIException { } }
final ProtocolDataUnit protocolDataUnit = protocolDataUnitFactory . create ( true , true , OperationCode . LOGOUT_REQUEST , connection . getSetting ( OperationalTextKey . HEADER_DIGEST ) , connection . getSetting ( OperationalTextKey . DATA_DIGEST ) ) ; final LogoutRequestParser logoutRequest = ( LogoutRequestParser ) protocolDataUnit . getBasicHeaderSegment ( ) . getParser ( ) ; logoutRequest . setReasonCode ( reasonCode ) ; if ( reasonCode != LogoutReasonCode . CLOSE_SESSION ) { logoutRequest . setConnectionID ( connection . getConnectionID ( ) ) ; } connection . send ( protocolDataUnit ) ; connection . nextState ( new LogoutResponseState ( connection ) ) ; super . stateFollowing = true ; // return true ;
public class AmazonRedshiftClient { /** * Deletes a snapshot schedule . * @ param deleteSnapshotScheduleRequest * @ return Result of the DeleteSnapshotSchedule operation returned by the service . * @ throws InvalidClusterSnapshotScheduleStateException * The cluster snapshot schedule state is not valid . * @ throws SnapshotScheduleNotFoundException * We could not find the specified snapshot schedule . * @ sample AmazonRedshift . DeleteSnapshotSchedule * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / redshift - 2012-12-01 / DeleteSnapshotSchedule " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DeleteSnapshotScheduleResult deleteSnapshotSchedule ( DeleteSnapshotScheduleRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDeleteSnapshotSchedule ( request ) ;
public class OWLObjectMaxCardinalityImpl_CustomFieldSerializer { /** * Deserializes the content of the object from the * { @ link com . google . gwt . user . client . rpc . SerializationStreamReader } . * @ param streamReader the { @ link com . google . gwt . user . client . rpc . SerializationStreamReader } to read the * object ' s content from * @ param instance the object instance to deserialize * @ throws com . google . gwt . user . client . rpc . SerializationException * if the deserialization operation is not * successful */ @ Override public void deserializeInstance ( SerializationStreamReader streamReader , OWLObjectMaxCardinalityImpl instance ) throws SerializationException { } }
deserialize ( streamReader , instance ) ;
public class Discovery { /** * List collections . * Lists existing collections for the service instance . * @ param listCollectionsOptions the { @ link ListCollectionsOptions } containing the options for the call * @ return a { @ link ServiceCall } with a response type of { @ link ListCollectionsResponse } */ public ServiceCall < ListCollectionsResponse > listCollections ( ListCollectionsOptions listCollectionsOptions ) { } }
Validator . notNull ( listCollectionsOptions , "listCollectionsOptions cannot be null" ) ; String [ ] pathSegments = { "v1/environments" , "collections" } ; String [ ] pathParameters = { listCollectionsOptions . environmentId ( ) } ; RequestBuilder builder = RequestBuilder . get ( RequestBuilder . constructHttpUrl ( getEndPoint ( ) , pathSegments , pathParameters ) ) ; builder . query ( "version" , versionDate ) ; Map < String , String > sdkHeaders = SdkCommon . getSdkHeaders ( "discovery" , "v1" , "listCollections" ) ; for ( Entry < String , String > header : sdkHeaders . entrySet ( ) ) { builder . header ( header . getKey ( ) , header . getValue ( ) ) ; } builder . header ( "Accept" , "application/json" ) ; if ( listCollectionsOptions . name ( ) != null ) { builder . query ( "name" , listCollectionsOptions . name ( ) ) ; } return createServiceCall ( builder . build ( ) , ResponseConverterUtils . getObject ( ListCollectionsResponse . class ) ) ;
public class DefaultGroovyMethods { /** * Returns the first non - null closure result found by passing each map entry to the closure , otherwise the defaultResult is returned . * If the closure takes two parameters , the entry key and value are passed . * If the closure takes one parameter , the Map . Entry object is passed . * < pre class = " groovyTestCase " > * assert " Found b : 3 " = = [ a : 1 , b : 3 ] . findResult ( " default " ) { if ( it . value = = 3 ) return " Found $ { it . key } : $ { it . value } " } * assert " default " = = [ a : 1 , b : 3 ] . findResult ( " default " ) { if ( it . value = = 9 ) return " Found $ { it . key } : $ { it . value } " } * assert " Found a : 1 " = = [ a : 1 , b : 3 ] . findResult ( " default " ) { k , v - > if ( k . size ( ) + v = = 2 ) return " Found $ k : $ v " } * < / pre > * @ param self a Map * @ param defaultResult an Object that should be returned if all closure results are null * @ param closure a 1 or 2 arg Closure that returns a non - null value when processing should stop and a value should be returned * @ return the first non - null result collected by calling the closure , or the defaultResult if no such result was found * @ since 1.7.5 */ public static < T , U extends T , V extends T , A , B > T findResult ( Map < A , B > self , U defaultResult , @ ClosureParams ( MapEntryOrKeyValue . class ) Closure < V > closure ) { } }
T result = findResult ( self , closure ) ; if ( result == null ) return defaultResult ; return result ;
public class PrimaryBackupServerContext { /** * Handles a close request . */ private CompletableFuture < CloseResponse > close ( CloseRequest request ) { } }
return getService ( request ) . thenCompose ( service -> service . close ( request ) ) ;
public class JPAPUnitInfo { /** * d510184 */ JPAEMPool getEntityManagerPool ( J2EEName j2eeName , String refName , Map < ? , ? > properties ) { } }
JPAEMPool emPool = null ; String poolKey = j2eeName . toString ( ) + "#" + refName ; synchronized ( ivEMPoolMap ) { emPool = ivEMPoolMap . get ( poolKey ) ; if ( emPool == null ) { EntityManagerFactory emf = getEntityManagerFactory ( j2eeName ) ; emPool = new JPAEMPool ( emf , properties , ivEMPoolCapacity , this , getJPAComponent ( ) ) ; // d638095.1 , d743325 ivEMPoolMap . put ( poolKey , emPool ) ; } } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "getEntityManagerPool : " + poolKey + " : " + emPool ) ; return emPool ;
public class JShell { /** * Returns the active import snippets . * This convenience method is equivalent to { @ code snippets ( ) } filtered for * { @ link jdk . jshell . Snippet . Status # isActive ( ) status ( snippet ) . isActive ( ) } * { @ code & & snippet . kind ( ) = = Kind . IMPORT } * and cast to ImportSnippet . * @ return the active declared import declarations . */ public Stream < ImportSnippet > imports ( ) { } }
return snippets ( ) . filter ( sn -> status ( sn ) . isActive ( ) && sn . kind ( ) == Snippet . Kind . IMPORT ) . map ( sn -> ( ImportSnippet ) sn ) ;
public class PackageDocImpl { /** * Get included enum types in this package . * @ return included enum types in this package . */ public ClassDoc [ ] enums ( ) { } }
ListBuffer < ClassDocImpl > ret = new ListBuffer < ClassDocImpl > ( ) ; for ( ClassDocImpl c : getClasses ( true ) ) { if ( c . isEnum ( ) ) { ret . append ( c ) ; } } return ret . toArray ( new ClassDocImpl [ ret . length ( ) ] ) ;
public class CompositeArtifactStore { /** * { @ inheritDoc } */ public long getMetadataLastModified ( String path ) throws IOException , MetadataNotFoundException { } }
boolean found = false ; long lastModified = 0 ; for ( int i = 0 ; i < stores . length ; i ++ ) { try { if ( ! found ) { lastModified = stores [ i ] . getMetadataLastModified ( path ) ; found = true ; } else { lastModified = Math . max ( lastModified , stores [ i ] . getMetadataLastModified ( path ) ) ; } } catch ( MetadataNotFoundException e ) { // ignore } } if ( ! found ) { throw new MetadataNotFoundException ( path ) ; } return lastModified ;
public class RoleAssignmentsInner { /** * Get the specified role assignment . * @ param scope The scope of the role assignment . * @ param roleAssignmentName The name of the role assignment to get . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < RoleAssignmentInner > getAsync ( String scope , String roleAssignmentName , final ServiceCallback < RoleAssignmentInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( getWithServiceResponseAsync ( scope , roleAssignmentName ) , serviceCallback ) ;
public class SanitizedContent { /** * Creates a SanitizedContent object with default direction . */ static SanitizedContent create ( String content , ContentKind kind ) { } }
checkArgument ( kind != ContentKind . TEXT , "Use UnsanitizedString for SanitizedContent with a kind of TEXT" ) ; if ( Flags . stringIsNotSanitizedContent ( ) ) { return new SanitizedContent ( content , kind , kind . getDefaultDir ( ) ) ; } return SanitizedCompatString . create ( content , kind , kind . getDefaultDir ( ) ) ;
public class FatJarBuilder { /** * builds war with classes inside * @ param classPathEntries class path entries as ArtifactSpec or URLs * @ return the war file */ private File buildWar ( List < ArtifactOrFile > classPathEntries ) { } }
try { List < String > classesUrls = classPathEntries . stream ( ) . map ( ArtifactOrFile :: file ) . filter ( this :: isDirectory ) . filter ( url -> url . contains ( "classes" ) ) . collect ( Collectors . toList ( ) ) ; List < File > classpathJars = classPathEntries . stream ( ) . map ( ArtifactOrFile :: file ) . filter ( file -> file . endsWith ( ".jar" ) ) . map ( File :: new ) . collect ( Collectors . toList ( ) ) ; return WarBuilder . build ( classesUrls , classpathJars ) ; } catch ( IOException e ) { throw new RuntimeException ( "failed to build war" , e ) ; }
public class TransformationAction { /** * Applies the instrumentation . * @ param root The root folder that contains all class files . * @ param classPath An iterable over all class path elements . * @ throws IOException If an I / O exception occurs . */ @ SuppressWarnings ( "unchecked" ) private void apply ( File root , Iterable < ? extends File > classPath ) throws IOException { } }
if ( ! root . isDirectory ( ) ) { throw new GradleException ( "Not a directory: " + root ) ; } ClassLoaderResolver classLoaderResolver = new ClassLoaderResolver ( ) ; try { List < Plugin . Factory > factories = new ArrayList < Plugin . Factory > ( byteBuddyExtension . getTransformations ( ) . size ( ) ) ; for ( Transformation transformation : byteBuddyExtension . getTransformations ( ) ) { String plugin = transformation . getPlugin ( ) ; try { factories . add ( new Plugin . Factory . UsingReflection ( ( Class < ? extends Plugin > ) Class . forName ( plugin , false , classLoaderResolver . resolve ( transformation . getClassPath ( root , classPath ) ) ) ) . with ( transformation . makeArgumentResolvers ( ) ) . with ( Plugin . Factory . UsingReflection . ArgumentResolver . ForType . of ( File . class , root ) , Plugin . Factory . UsingReflection . ArgumentResolver . ForType . of ( Logger . class , project . getLogger ( ) ) , Plugin . Factory . UsingReflection . ArgumentResolver . ForType . of ( BuildLogger . class , new GradleBuildLogger ( project . getLogger ( ) ) ) ) ) ; project . getLogger ( ) . info ( "Resolved plugin: {}" , transformation . getRawPlugin ( ) ) ; } catch ( Throwable throwable ) { throw new GradleException ( "Cannot resolve plugin: " + transformation . getRawPlugin ( ) , throwable ) ; } } EntryPoint entryPoint = byteBuddyExtension . getInitialization ( ) . getEntryPoint ( classLoaderResolver , root , classPath ) ; project . getLogger ( ) . info ( "Resolved entry point: {}" , entryPoint ) ; List < ClassFileLocator > classFileLocators = new ArrayList < ClassFileLocator > ( ) ; for ( File artifact : classPath ) { classFileLocators . add ( artifact . isFile ( ) ? ClassFileLocator . ForJarFile . of ( artifact ) : new ClassFileLocator . ForFolder ( artifact ) ) ; } ClassFileLocator classFileLocator = new ClassFileLocator . Compound ( classFileLocators ) ; Plugin . Engine . Summary summary ; try { project . getLogger ( ) . info ( "Processing class files located in in: {}" , root ) ; Plugin . Engine pluginEngine ; try { ClassFileVersion classFileVersion ; JavaPluginConvention convention = ( JavaPluginConvention ) project . getConvention ( ) . getPlugins ( ) . get ( "java" ) ; if ( convention == null ) { classFileVersion = ClassFileVersion . ofThisVm ( ) ; project . getLogger ( ) . warn ( "Could not locate Java target version, build is JDK dependant: {}" , classFileVersion . getMajorVersion ( ) ) ; } else { classFileVersion = ClassFileVersion . ofJavaVersion ( Integer . parseInt ( convention . getTargetCompatibility ( ) . getMajorVersion ( ) ) ) ; project . getLogger ( ) . debug ( "Java version detected: {}" , convention . getTargetCompatibility ( ) . getMajorVersion ( ) ) ; } pluginEngine = Plugin . Engine . Default . of ( entryPoint , classFileVersion , byteBuddyExtension . getMethodNameTransformer ( ) ) ; } catch ( Throwable throwable ) { throw new GradleException ( "Cannot create plugin engine" , throwable ) ; } try { summary = pluginEngine . with ( byteBuddyExtension . isExtendedParsing ( ) ? Plugin . Engine . PoolStrategy . Default . EXTENDED : Plugin . Engine . PoolStrategy . Default . FAST ) . with ( classFileLocator ) . with ( new TransformationLogger ( project . getLogger ( ) ) ) . withErrorHandlers ( Plugin . Engine . ErrorHandler . Enforcing . ALL_TYPES_RESOLVED , byteBuddyExtension . isFailOnLiveInitializer ( ) ? Plugin . Engine . ErrorHandler . Enforcing . NO_LIVE_INITIALIZERS : Plugin . Engine . Listener . NoOp . INSTANCE , byteBuddyExtension . isFailFast ( ) ? Plugin . Engine . ErrorHandler . Failing . FAIL_FAST : Plugin . Engine . Listener . NoOp . INSTANCE ) . apply ( new Plugin . Engine . Source . ForFolder ( root ) , new Plugin . Engine . Target . ForFolder ( root ) , factories ) ; } catch ( Throwable throwable ) { throw new GradleException ( "Failed to transform class files in " + root , throwable ) ; } } finally { classFileLocator . close ( ) ; } if ( ! summary . getFailed ( ) . isEmpty ( ) ) { throw new GradleException ( summary . getFailed ( ) + " type transformations have failed" ) ; } else if ( summary . getTransformed ( ) . isEmpty ( ) ) { project . getLogger ( ) . warn ( "No types were transformed during plugin execution" ) ; } else { project . getLogger ( ) . info ( "Transformed {} types" , summary . getTransformed ( ) . size ( ) ) ; } } finally { classLoaderResolver . close ( ) ; }
public class ProtocolConfig { /** * Sets a positive number indicating how many < code > unacked < / code > or < code > unordered < / code > packaged user - datas should be * queued maximally until they can be resolved . Defaults to { @ link ProtocolConfig # MAX _ PACKET _ QUEUE _ LIMIT } . * @ param packetQueueLimit the packet queue limit */ public void setPacketQueueLimit ( int packetQueueLimit ) { } }
packetQueueLimit = packetQueueLimit < MAX_PACKET_QUEUE_LIMIT ? packetQueueLimit : MAX_PACKET_QUEUE_LIMIT ; packetQueueLimit = packetQueueLimit >= 0 ? packetQueueLimit : 0 ; this . packetQueueLimit = packetQueueLimit ;
public class UserApi { /** * Get a list of currently authenticated user ' s SSH keys . * < pre > < code > GitLab Endpoint : GET / user / keys < / code > < / pre > * @ return a list of currently authenticated user ' s SSH keys * @ throws GitLabApiException if any exception occurs */ public List < SshKey > getSshKeys ( ) throws GitLabApiException { } }
Response response = get ( Response . Status . OK , getDefaultPerPageParam ( ) , "user" , "keys" ) ; return ( response . readEntity ( new GenericType < List < SshKey > > ( ) { } ) ) ;
public class InMemoryOutput { /** * Returns a list of lists where the outer list has one element for each * reduce shard , which is a list of the values emitted by that shard , in * order . */ @ Override public List < List < O > > finish ( Collection < ? extends OutputWriter < O > > writers ) { } }
ImmutableList . Builder < List < O > > out = ImmutableList . builder ( ) ; for ( OutputWriter < O > w : writers ) { InMemoryOutputWriter < O > writer = ( InMemoryOutputWriter < O > ) w ; out . add ( ImmutableList . copyOf ( writer . getResult ( ) ) ) ; } return out . build ( ) ;
public class Util { /** * Creates a Predicate that returns false if source contains an associated class that is a super * type of the class associated with the tested T . * @ param < T > the type to test * @ param source the set of & lt ; T & gt ; to look for class matches . * @ param toClass Function from T to Class * @ return newly create predicate . */ static < T > Predicate < T > createMostSpecificMatchPredicate ( final Iterable < T > source , final Function < T , Class < ? > > toClass ) { } }
return input -> { final Class < ? > inputClass = toClass . apply ( input ) ; for ( final Class < ? > match : Iterables . transform ( source , toClass ) ) { if ( ! inputClass . equals ( match ) && inputClass . isAssignableFrom ( match ) ) { return false ; } } return true ; } ;
public class Path { /** * Checks if the directory of this path is absolute . * @ return < code > true < / code > if the directory of this path is absolute , < code > false < / code > otherwise */ public boolean isAbsolute ( ) { } }
final int start = hasWindowsDrive ( uri . getPath ( ) , true ) ? 3 : 0 ; return uri . getPath ( ) . startsWith ( SEPARATOR , start ) ;
public class ElementFilter { /** * Returns a list of constructors in { @ code elements } . * @ return a list of constructors in { @ code elements } * @ param elements the elements to filter */ public static List < ExecutableElement > constructorsIn ( Iterable < ? extends Element > elements ) { } }
return listFilter ( elements , CONSTRUCTOR_KIND , ExecutableElement . class ) ;
public class TreeRule { /** * Attempts to delete the specified rule from storage * @ param tsdb The TSDB to use for storage access * @ param tree _ id ID of the tree the rule belongs to * @ param level Level where the rule resides * @ param order Order where the rule resides * @ return A deferred without meaning . The response may be null and should * only be used to track completion . * @ throws HBaseException if there was an issue * @ throws IllegalArgumentException if the one of the required parameters was * missing */ public static Deferred < Object > deleteRule ( final TSDB tsdb , final int tree_id , final int level , final int order ) { } }
if ( tree_id < 1 || tree_id > 65535 ) { throw new IllegalArgumentException ( "Invalid Tree ID" ) ; } if ( level < 0 ) { throw new IllegalArgumentException ( "Invalid rule level" ) ; } if ( order < 0 ) { throw new IllegalArgumentException ( "Invalid rule order" ) ; } final DeleteRequest delete = new DeleteRequest ( tsdb . treeTable ( ) , Tree . idToBytes ( tree_id ) , Tree . TREE_FAMILY ( ) , getQualifier ( level , order ) ) ; return tsdb . getClient ( ) . delete ( delete ) ;
public class Tracer { /** * Pop CCM context * @ param key The frame key * @ param callstack The call stack */ public static synchronized void popCCMContext ( String key , Throwable callstack ) { } }
log . tracef ( "%s" , new TraceEvent ( "CachedConnectionManager" , "NONE" , TraceEvent . POP_CCM_CONTEXT , "NONE" , key , callstack != null ? toString ( callstack ) : "" ) ) ;
public class MimeUtils { /** * Creates a new { @ link javax . activation . MimeType } from a string , but issues a * { @ link RuntimeException } instead of a checked { @ link MimeTypeParseException } * in case the string cannot be parsed . Useful when creating constants for which * we are certain that no exception should ensue . E . g . : * < code > * public class MyClass { * public final MimeType MY _ MIME = mimeType ( " application / my - mime " ) ; * < / code > * @ param type a MIME type as a { @ link String } ( e . g . " text / html " ) . * @ return the corresponding { @ link MimeType } . * @ throws java . lang . RuntimeException if the { @ link MimeType } constructor * throws { @ link MimeTypeParseException } . */ public static MimeType mimeType ( String type ) { } }
try { return new MimeType ( type ) ; } catch ( MimeTypeParseException ex ) { throw new RuntimeException ( ex ) ; }
public class VueGWTObserverManager { /** * Customize the VueObserver instance . We get in between to be warned whenever an object is * observed and observe it using our Java observers if necessary . * @ param vueObserver A { @ link VueObserver } */ public void customizeVueObserverPrototype ( VueObserver vueObserver ) { } }
vueObserveArrayFunction = vueObserver . getObserveArray ( ) ; vueWalkFunction = vueObserver . getWalk ( ) ; vueObserver . setWalk ( toObserve -> { if ( observeJavaObject ( toObserve ) ) { return ; } vueWalkFunction . call ( this , toObserve ) ; } ) ;
public class FileModeMapper { /** * Utility method to create a FileModeMapper for the given entry , and use it to map the file mode onto the given * file . * @ param entry the archive entry that holds the mode * @ param file the file to apply the mode onto */ public static void map ( ArchiveEntry entry , File file ) throws IOException { } }
create ( entry ) . map ( file ) ;
public class TFDictionary { /** * 按照频率从高到低排序的条目 * @ return */ public TreeSet < TermFrequency > values ( ) { } }
TreeSet < TermFrequency > set = new TreeSet < TermFrequency > ( Collections . reverseOrder ( ) ) ; for ( Map . Entry < String , TermFrequency > entry : entrySet ( ) ) { set . add ( entry . getValue ( ) ) ; } return set ;
public class CloudDirectoryUtils { /** * Gets object ref by path . * @ param path the path * @ return the object ref by path */ public static ObjectReference getObjectRefByPath ( final String path ) { } }
if ( path == null ) { return null ; } return new ObjectReference ( ) . withSelector ( path ) ;
public class CouchDBSchemaManager { /** * Creates the view for max . * @ param views * the views */ private void createViewForMax ( Map < String , MapReduce > views ) { } }
if ( views . get ( CouchDBConstants . MAX ) == null ) { MapReduce mapr = new MapReduce ( ) ; mapr . setMap ( "function(doc){for(field in doc){var o = doc[field];if(typeof(o)==\"number\")emit(field+\"_\"+doc." + CouchDBConstants . ENTITYNAME + ", o);}}" ) ; mapr . setReduce ( "function(keys, values){return Math.max.apply(Math, values);}" ) ; views . put ( CouchDBConstants . MAX , mapr ) ; }
public class FileLogOutput { /** * Add some data from a LogRecord to the logBuffer if there is room . * @ param logRecord from which to write the next part . * @ param setMark true if fileMark is to be set . * @ param checkSpace true is we should check that log file space is available before * filling the buffer . * @ param flush true if we must force the last part of the logRecord to disk before we return * with the logSequenceNumber set . * @ return long the LogSequenceNumber that identifies the position in the log * or zero if the logRecord is not completely written . * or - 1 if the logBuffer was full , and nothing was written . * @ throws ObjectManagerException */ private long addBuffers ( LogRecord logRecord , boolean setMark , boolean checkSpace , boolean flush ) throws ObjectManagerException { } }
final String methodName = "addBuffers" ; if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . entry ( this , cclass , methodName , new Object [ ] { logRecord , new Boolean ( setMark ) , new Boolean ( checkSpace ) , new Boolean ( flush ) } ) ; // Where we will put the data to be logged . int reservedAddress = 0 ; // The first and last pages in the logBuffer that we write into . int startPage = 0 ; int endPage = 0 ; // The LogBuffers we will start and end filling . LogBuffer startFillingLogBuffer ; LogBuffer endFillingLogBuffer ; // LogSequenceNumber for return ; long returnLogSequenceNumber = 0 ; // Find the number of bytes we will write this time . int totalBytes = logRecord . getBytesLeft ( ) ; // Decide if we need to split the log record into multiple parts . We do not allow long logRecords // to be written into the LogBuffer as a single piece because that would stop other log records // from being written if they used up the whole of the log buffer . Additionally this would also mean that // log records must be smaller than the log buffer in order to fit into it . boolean completed = false ; if ( totalBytes > maximumLogRecordPart ) { // set the length to be written this time . totalBytes = maximumLogRecordPart ; } else { completed = true ; } long startWriteMilliseconds ; if ( gatherStatistics ) { startWriteMilliseconds = System . currentTimeMillis ( ) ; } // if ( gatherStatistics ) . // Lock the logBuffer so that we can work out where to copy this logRecord . synchronized ( logBufferLock ) { if ( gatherStatistics ) { // Time how long we took to get the lock on the logBufferLock . long now = System . currentTimeMillis ( ) ; writeStalledMilliseconds += now - startWriteMilliseconds ; startWriteMilliseconds = now ; } // if ( gatherStatistics ) . // Set address for the copy . reservedAddress = nextFreeByteInLogBuffer ; // We never set nextFreeByteInLogBuffer to be the first byte , because this // is ultimately occupied by the sector bits copied from each sector . Hence there // is no possibility of having to step over the sector byte . // if ( nextFreeByteInLogBuffer % pageSize = = 0 ) reservedAddress + + ; // startPage = reservedAddress / pageSize ; startPage = lastPageFilling ; // Calculate the number of extra page boundaries we will cross each time we start a new page , // because we will need to leave a byte at the beginning of each one to put the sector bits in . // Work out pages started here rather than outside synchronize ( logBufferLock ) because we now know the // exact start address within the buffer . The - 1 is to allow for the sector byte used in the first page , // the pageSize - 1 is the number of data bytes we expect to fit into each page . int pagesStarted = ( totalBytes + partHeaderLength + ( reservedAddress % pageSize ) - 1 ) / ( pageSize - 1 ) ; // Move the start address on for the next log Record . This will leave us positioned // after byte zero , the sector byte but never on the sector byte . int updatedNextFreeByteInLogBuffer = reservedAddress + totalBytes + partHeaderLength + pagesStarted ; // See if we have wrapped around the end of the logBuffer . boolean wrappedLogBuffer = false ; // Capture the logBuffer where we will start to fill . startFillingLogBuffer = logBuffer ; // Capture the current last page we notified , this may move forward while we do the check on the space // in the LogBuffer , that is OK because we will fail to find the space and then come back here again . // This is updated by the NotifyHelper after it has cleared pageWritersStarted , so we must capture // it before we advance pageWritersStarted . The notify helper cannot move this past lastPageFilling // because we hold the logBufferLock and will not advance this before we release it . // We only fill up to the nextToLastPageNotified otherwise an empty logBuffer and a full logBuffer // both have firstPageFilling = = lastPageBNotified and so cannot be distinguished . int nextToLastPageNotified = lastPageNotified - 1 ; if ( nextToLastPageNotified < 0 ) nextToLastPageNotified = startFillingLogBuffer . numberOfPages - 1 ; if ( updatedNextFreeByteInLogBuffer >= startFillingLogBuffer . buffer . length ) { updatedNextFreeByteInLogBuffer = updatedNextFreeByteInLogBuffer - startFillingLogBuffer . buffer . length ; wrappedLogBuffer = true ; } // if ( updatedNextFreeByteInLogBuffer > = logBuffer . length ) . endPage = updatedNextFreeByteInLogBuffer / pageSize ; // Make sure there is enough room left in the logBuffer and that we have not run into the pages that // are still waiting to be written and notified . This prevents us from copying over over unwritten data . // Take the two cases . // Buffer Wrapped Fill < Notify Buffer not wrapped Fill > Notify // Fill Notify Notify Fill // I / / / / / / / / / / I I / / / / / I // The end page must not be in the unwritten hashed pages . We cannot run into the firstPageToNotify // by padding the logBuffer because we only pad if a single page is filling . We always leave a one // page gap between the last full page and the lastPaageNotified , hence startPage is never equal // to nextToLastPageNotified . // We cannot test fillingLogBuffer . pageFlushPending [ endPage ] because we may be using a different log buffer // to the one the flushHelper is using . // This test assumes that a single LogRecord part cannot wrap round the entire buffer . if ( ( ( startPage < nextToLastPageNotified ) && ( endPage < startPage || endPage >= nextToLastPageNotified ) ) || ( ( startPage > nextToLastPageNotified ) && ( endPage < startPage && endPage >= nextToLastPageNotified ) ) ) { if ( gatherStatistics ) totalNumberOfThreadsFindingFullLogBuffers ++ ; // Request a larger logBuffer . if ( newLogBufferPages == 0 && startFillingLogBuffer . numberOfPages < maximumLogBufferPages ) newLogBufferPages = Math . min ( startFillingLogBuffer . numberOfPages * 2 , maximumLogBufferPages ) ; // Go round again and see if the next attempt finds a logBuffer with enough space left . if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . exit ( this , cclass , methodName , new Object [ ] { "logBuffer_full" , new Integer ( startPage ) , new Integer ( endPage ) , new Integer ( startFillingLogBuffer . pageWritersActive . get ( startPage ) ) , new Integer ( nextToLastPageNotified ) } ) ; return - 1 ; } // if ( ( lastPageFilling < localLastPageNotified ) . . . /* * Debug * / / If we stepped into a new page the we must be the first writer . * if ( logRecord . multiPartID ! = 0 ) { * if ( logRecord . atStart ( ) ) { * / / Preserve the start of any long active LogRecords . * multiPartFileStart [ logRecord . multiPartID ] = bufferFilePosition + reservedAddress ; * multiPartSectorByte [ logRecord . multiPartID ] = filePositionSectorByte ; * if ( multiPartFileStart [ logRecord . multiPartID ] > fileLogHeader . fileSize ) { * multiPartFileStart [ logRecord . multiPartID ] = multiPartFileStart [ logRecord . multiPartID ] - fileLogHeader . fileSize + FileLogHeader . headerLength * 2; * multiPartFileStart [ logRecord . multiPartID ] = ( multiPartFileStart [ logRecord . multiPartID ] = = 0 ? ( byte ) 1 : ( byte ) 0 ) ; * } / / if ( multiPartFileStart [ logRecord . multiPartID ] > fileLogHeader . fileSize ) . * / / Capture unchecked bytes up to the start of this LogRecord , * / / in case we have to truncate to this point . * multiPartUncheckedBytes [ logRecord . multiPartID ] = uncheckedBytes ; * } else if ( completed ) { * multiPartFileStart [ logRecord . multiPartID ] = - 1; * } / / if ( logRecord . atStart ( ) ) . * } / / if ( logRecord . multiPartID ! = 0 ) . * / / Debug */ // Were we asked to set the mark point ? if ( setMark ) { // Set Mark for first part only , later we truncate up to this point . if ( logRecord . atStart ( ) ) { fileMark = bufferFilePosition + reservedAddress ; fileMarkSectorByte = bufferSectorByte ; uncheckedBytesUpToMarkPoint = uncheckedBytes ; if ( fileMark > fileLogHeader . fileSize ) { fileMark = fileMark - fileLogHeader . fileSize + FileLogHeader . headerLength * 2 ; fileMarkSectorByte = ( fileMarkSectorByte == 0 ? ( byte ) 1 : ( byte ) 0 ) ; } // if ( fileMark > fileLogHeader . fileSize ) . if ( Tracing . isAnyTracingEnabled ( ) && trace . isDebugEnabled ( ) ) trace . debug ( this , cclass , methodName , new Object [ ] { "setMark" , new Long ( fileMark ) , new Byte ( fileMarkSectorByte ) , new Long ( bufferFilePosition ) , new Byte ( bufferSectorByte ) } ) ; } // if ( logRecord . atStart ( ) ) . // Include any long active log records . Find any log long Records that // ended after the Mark point or are still being written . Move the mark point // back to the earliest start . The actual start of the log file cannot be moved // while we do this calculation because we hold the fileMarkLock so we use that // as the reference point in the file to decide which LogRecord is the earliest // in the active part of the file . long fileMarkOffsetFromStart = offsetFromStart ( fileMark ) ; long smallestFileMarkOffsetFromStart = fileMarkOffsetFromStart ; for ( int i = 1 ; i < multiPartFileStart . length ; i ++ ) { if ( multiPartFileStart [ i ] >= 0 ) { // Still writing . long offsetFromStart = offsetFromStart ( multiPartFileStart [ i ] ) ; if ( offsetFromStart < smallestFileMarkOffsetFromStart ) { smallestFileMarkOffsetFromStart = offsetFromStart ; fileMark = multiPartFileStart [ i ] ; fileMarkSectorByte = multiPartSectorByte [ i ] ; uncheckedBytesUpToMarkPoint = multiPartUncheckedBytes [ logRecord . multiPartID ] ; } // if ( offsetFromStart < smallestFileMarkOffsetFromStart ) . } // if ( multiPartFileStart [ i ] > = 0 ) . } // for . . . if ( Tracing . isAnyTracingEnabled ( ) && trace . isDebugEnabled ( ) ) trace . debug ( this , cclass , methodName , new Object [ ] { "truncateMarkSet" , new Long ( fileMark ) , new Byte ( fileMarkSectorByte ) , new Long ( uncheckedBytesUpToMarkPoint ) } ) ; // Reset the unchecked bytes remaining after the mark point . uncheckedBytes = uncheckedBytes - uncheckedBytesUpToMarkPoint ; } // if ( setMark ) . if ( ! checkSpace ) { // Unchecked bytes are not returned to the available space when we truncate the log file as they are already // reserved and remain reserved after the truncate . uncheckedBytes = uncheckedBytes + totalBytes + partHeaderLength ; } // if ( ! checkSpace ) . if ( wrappedLogBuffer ) { startFillingLogBuffer . wrapped ( ) ; } // if ( wrappedLogBuffer ) . // Now capture the logBuffer we end filling , this may be the same as the start one if // we did not wrap or we did not change the logBuffer . endFillingLogBuffer = logBuffer ; if ( completed ) { returnLogSequenceNumber = endFillingLogBuffer . sequenceNumberOfFirstPage + endPage ; logSequenceNumber = returnLogSequenceNumber ; if ( flush ) endFillingLogBuffer . pageWaiterExists [ endPage ] = true ; } // if ( completed ) . // Count the threads who start writing in each page . startFillingLogBuffer . pageWritersActive . incrementAndGet ( startPage ) ; nextFreeByteInLogBuffer = updatedNextFreeByteInLogBuffer ; // lastPageFilling is not volatile but checked first in the attempt to move firstPageFilling // forward below . We have already incremented pageWritersStarted which is volatile and checked // second . lastPageFilling = endPage ; if ( Tracing . isAnyTracingEnabled ( ) && trace . isDebugEnabled ( ) ) trace . debug ( this , cclass , methodName , new Object [ ] { "logBuffer_reserved" , new Long ( endFillingLogBuffer . sequenceNumberOfFirstPage ) , new Integer ( firstPageFilling ) , new Integer ( lastPageFilling ) , new Integer ( reservedAddress ) , new Integer ( startPage ) , new Integer ( endPage ) , new Integer ( nextToLastPageNotified ) , new Integer ( startFillingLogBuffer . pageWritersActive . get ( startPage ) ) } ) ; } // synchronized ( logBufferLock ) . // Lock on the logBuffer is now released , filling the buffer takes place in parallel . // Calculation of BytesInLogBuffer above means we will already allowed for any leading byte of sector bits . // Add a header for the logRecord part followed by the logRecord data . addPart ( logRecord , startFillingLogBuffer . buffer , completed , reservedAddress , totalBytes ) ; if ( gatherStatistics ) { // Time how long we took to complete copying the logRecord . long now = System . currentTimeMillis ( ) ; writeCopyingMilliseconds += now - startWriteMilliseconds ; startWriteMilliseconds = now ; } // if ( gatherStatistics ) . // Mark our write as finished and restart the flushHelper if it has stalled . boolean startFlush = false ; for ( int iPage = startPage ; ; iPage ++ ) { if ( iPage == startFillingLogBuffer . numberOfPages ) { iPage = 0 ; startFillingLogBuffer = logBuffer ; } // if ( iPage = = fillingLogBuffer . numberOfPages ) . synchronized ( startFillingLogBuffer . pageStateLock [ iPage ] ) { // We can loop through the logBuffer multiple times if the flushHelper writes pages // while we are in this for loop , and enables writer threads to keep advancing the // lastPageFilling . On the first pass through the loop startFlush is false so we use // that to determine if we are on the startPage rather than checking if ( iPage = = startPage ) // as that would be true on each successive pass through the pages . if ( ! startFlush ) startFillingLogBuffer . pageWritersActive . decrementAndGet ( startPage ) ; // lastPageFilling may have no data in it as yet . It is never completely full either // as we would move to the next page in that case , so there is never any possibility of it being ready // to write without padding it . if ( iPage == lastPageFilling ) break ; // pageWritersStarted is no longer incrementing because iPage is positioned before lastPageFilling . if ( startFillingLogBuffer . pageWritersActive . get ( iPage ) == 0 && iPage == firstPageFilling ) { startFillingLogBuffer . pageFlushPending [ iPage ] = true ; // Advance firstPageFilling in a way that is always valid because it is captured in flush ( ) // above without holding a pageStateLock to protect it . if ( firstPageFilling + 1 == startFillingLogBuffer . numberOfPages ) firstPageFilling = 0 ; else firstPageFilling ++ ; startFlush = true ; } else { break ; } // if ( fillingLogBuffer . pageWriterStarted . . . } // synchronized ( fillingLogBuffer . pageStateLock [ i ] ) . } // for . . . if ( gatherStatistics ) { // Time how long we took to update the logBuffer state . long now = System . currentTimeMillis ( ) ; writeUpdateStateMilliseconds += now - startWriteMilliseconds ; startWriteMilliseconds = now ; } // if ( gatherStatistics ) . if ( Tracing . isAnyTracingEnabled ( ) && trace . isDebugEnabled ( ) ) trace . debug ( this , cclass , methodName , new Object [ ] { "firstPageFilling update done" , new Integer ( firstPageFilling ) , new Integer ( lastPageFilling ) , new Integer ( startPage ) , new Integer ( endPage ) , new Integer ( startFillingLogBuffer . pageWritersActive . get ( startPage ) ) , new Boolean ( startFillingLogBuffer . pageFlushPending [ startPage ] ) } ) ; if ( startFlush ) flushHelper . startFlush ( ) ; // See if there is anything to flush . if ( flush && completed ) { endFillingLogBuffer . waitForFlush ( endPage ) ; } if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . exit ( this , cclass , methodName , new Object [ ] { new Long ( returnLogSequenceNumber ) } ) ; return returnLogSequenceNumber ;
public class EntityDataModelUtil { /** * Gets the OData type for a Java type and checks if the OData type is a structured type ; throws an exception if the * OData type is not a structured type . * @ param entityDataModel The entity data model . * @ param javaType The Java type . * @ return The OData structured type for the Java type . * @ throws ODataSystemException If there is no OData type for the specified Java type or if the OData type is not * a structured type . */ public static StructuredType getAndCheckStructuredType ( EntityDataModel entityDataModel , Class < ? > javaType ) { } }
return checkIsStructuredType ( getAndCheckType ( entityDataModel , javaType ) ) ;
public class RsaUtil { /** * 使用密钥解包密钥 * @ param key * @ param unwrapKey * @ return */ public PrivateKey unwrapPrivateKeyByKey ( byte [ ] key , Key unwrapKey ) { } }
try { if ( key == null || key . length <= 0 || unwrapKey == null ) { return null ; } Cipher cipher = Cipher . getInstance ( algorithm ) ; // 使用私钥包裹模式 cipher . init ( Cipher . UNWRAP_MODE , unwrapKey ) ; return ( PrivateKey ) cipher . unwrap ( key , algorithm , Cipher . PRIVATE_KEY ) ; } catch ( Exception e ) { log . error ( e . getMessage ( ) , e ) ; } return null ;
public class MongoDBDialect { /** * do ' Map Reduce ' operation * @ param queryDescriptor descriptor of MongoDB map reduce query * @ param collection collection on which operation will be performed * @ return result iterator * @ see < a href = " https : / / docs . mongodb . com / manual / reference / method / db . collection . mapReduce / " > MapReduce < / a > */ private static ClosableIterator < Tuple > doMapReduce ( final MongoDBQueryDescriptor queryDescriptor , final MongoCollection < Document > collection ) { } }
MapReduceIterable < Document > mapReduceIterable = collection . mapReduce ( queryDescriptor . getMapFunction ( ) , queryDescriptor . getReduceFunction ( ) ) ; Document options = queryDescriptor . getOptions ( ) ; if ( options != null ) { Document query = ( Document ) options . get ( "query" ) ; Document sort = ( Document ) options . get ( "sort" ) ; Integer limit = options . getInteger ( "limit" ) ; String finalizeFunction = options . getString ( "finalize" ) ; Document scope = ( Document ) options . get ( "scope" ) ; Boolean jsMode = options . getBoolean ( "jsMode" ) ; Boolean verbose = options . getBoolean ( "verbose" ) ; Boolean bypassDocumentValidation = options . getBoolean ( "bypassDocumentValidation" ) ; Collation collation = getCollation ( ( Document ) options . get ( "collation" ) ) ; MapReduceAction mapReduceAction = null ; String collectionName = null ; String dbName = null ; Boolean sharded = null ; Boolean nonAtomic = null ; Object out ; if ( ( out = options . get ( "out" ) ) != null ) { if ( out instanceof String ) { collectionName = ( String ) out ; } else if ( out instanceof Document ) { Document outDocument = ( Document ) out ; if ( outDocument . containsKey ( "merge" ) ) { mapReduceAction = MapReduceAction . MERGE ; collectionName = outDocument . getString ( "merge" ) ; } else if ( outDocument . containsKey ( "replace" ) ) { mapReduceAction = MapReduceAction . REPLACE ; collectionName = outDocument . getString ( "replace" ) ; } else if ( ( ( Document ) out ) . containsKey ( "reduce" ) ) { mapReduceAction = MapReduceAction . REDUCE ; collectionName = outDocument . getString ( "reduce" ) ; } dbName = outDocument . getString ( "db" ) ; sharded = outDocument . getBoolean ( "sharded" ) ; nonAtomic = outDocument . getBoolean ( "nonAtomic" ) ; } } mapReduceIterable = ( query != null ) ? mapReduceIterable . filter ( query ) : mapReduceIterable ; mapReduceIterable = ( sort != null ) ? mapReduceIterable . sort ( sort ) : mapReduceIterable ; mapReduceIterable = ( limit != null ) ? mapReduceIterable . limit ( limit ) : mapReduceIterable ; mapReduceIterable = ( finalizeFunction != null ) ? mapReduceIterable . finalizeFunction ( finalizeFunction ) : mapReduceIterable ; mapReduceIterable = ( scope != null ) ? mapReduceIterable . scope ( scope ) : mapReduceIterable ; mapReduceIterable = ( jsMode != null ) ? mapReduceIterable . jsMode ( jsMode ) : mapReduceIterable ; mapReduceIterable = ( verbose != null ) ? mapReduceIterable . verbose ( verbose ) : mapReduceIterable ; mapReduceIterable = ( bypassDocumentValidation != null ) ? mapReduceIterable . bypassDocumentValidation ( bypassDocumentValidation ) : mapReduceIterable ; mapReduceIterable = ( collation != null ) ? mapReduceIterable . collation ( collation ) : mapReduceIterable ; mapReduceIterable = ( mapReduceAction != null ) ? mapReduceIterable . action ( mapReduceAction ) : mapReduceIterable ; mapReduceIterable = ( collectionName != null ) ? mapReduceIterable . collectionName ( collectionName ) : mapReduceIterable ; mapReduceIterable = ( dbName != null ) ? mapReduceIterable . databaseName ( dbName ) : mapReduceIterable ; mapReduceIterable = ( sharded != null ) ? mapReduceIterable . sharded ( sharded ) : mapReduceIterable ; mapReduceIterable = ( nonAtomic != null ) ? mapReduceIterable . nonAtomic ( nonAtomic ) : mapReduceIterable ; } MongoCursor < Document > cursor = mapReduceIterable . iterator ( ) ; Map < Object , Object > documents = new LinkedHashMap < > ( ) ; while ( cursor . hasNext ( ) ) { Document doc = cursor . next ( ) ; documents . put ( doc . get ( "_id" ) , doc . get ( "value" ) ) ; } MapTupleSnapshot snapshot = new MapTupleSnapshot ( Collections . < String , Object > singletonMap ( "n" , documents ) ) ; return CollectionHelper . newClosableIterator ( Collections . singletonList ( new Tuple ( snapshot , SnapshotType . UNKNOWN ) ) ) ;
public class Context { /** * Create a new context with the given key value set . The new context will cascade cancellation * from its parent . */ public < V1 , V2 > Context withValues ( Key < V1 > k1 , V1 v1 , Key < V2 > k2 , V2 v2 ) { } }
PersistentHashArrayMappedTrie < Key < ? > , Object > newKeyValueEntries = keyValueEntries . put ( k1 , v1 ) . put ( k2 , v2 ) ; return new Context ( this , newKeyValueEntries ) ;
public class DavUtil { /** * Add a namespace * @ param val * @ throws Throwable */ protected void addNs ( final XmlEmit xml , final String val ) throws Throwable { } }
if ( xml . getNameSpace ( val ) == null ) { xml . addNs ( new NameSpace ( val , null ) , false ) ; }
public class Ifc2x3tc1PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getIfcEllipse ( ) { } }
if ( ifcEllipseEClass == null ) { ifcEllipseEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc2x3tc1Package . eNS_URI ) . getEClassifiers ( ) . get ( 203 ) ; } return ifcEllipseEClass ;
public class OfficeReader { /** * Parses the input stream and generates an in - memory representation of the document . In most cases ( depending on the parser ) the full document needs to be represented in - memory . * @ throws org . zuinnote . hadoop . office . format . common . parser . FormatNotUnderstoodException in case an invalid format is detected */ public void parse ( ) throws FormatNotUnderstoodException { } }
// do content detection of document if ( this . hocr . getMimeType ( ) . contains ( OfficeReader . FORMAT_EXCEL ) ) { // check if low footprint if ( ! this . hocr . getLowFootprint ( ) ) { LOG . info ( "Using standard API to parse Excel file" ) ; // if it contains Excel then use default MSExcelParser this . currentParser = new MSExcelParser ( this . hocr , this . sheetsArray ) ; } else { // use low footprint parser LOG . info ( "Using low footprint API to parse Excel file" ) ; this . currentParser = new MSExcelLowFootprintParser ( this . hocr , this . sheetsArray ) ; } } else { // if it cannot be detected throw an exception throw new FormatNotUnderstoodException ( "Format not understood" ) ; } // parse the inputStream currentParser . parse ( this . in ) ;
public class TransmissionData { /** * Attempts to build a transmission into the specified buffer . The type of * transmission built will depend on the values passed into this object by * the reset ( ) method and also the setLayoutToXXX ( ) method . If there is * insufficient space in the supplied buffer , then multiple invocations of * this method my be required to incrementally build the transmission . * @ param xmitBuffer * @ return A boolean representing whether or not a complete transmission was built . */ boolean buildTransmission ( WsByteBuffer xmitBuffer ) { } }
if ( tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "buildTransmission" , xmitBuffer ) ; SIErrorException error = null ; while ( ! exhausedXmitBuffer && ! transmissionBuilt && ( error == null ) ) { if ( tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "state=" + state + " layout=" + layout ) ; switch ( state ) { case ( STATE_BUILDING_PRIMARY_HEADER ) : if ( buildHeader ( primaryHeaderFields , xmitBuffer ) ) { if ( layout == JFapChannelConstants . XMIT_PRIMARY_ONLY ) state = STATE_BUILDING_PAYLOAD ; else if ( layout == JFapChannelConstants . XMIT_CONVERSATION ) state = STATE_BUILDING_CONVERSATION_HEADER ; else if ( layout == JFapChannelConstants . XMIT_SEGMENT_START ) state = STATE_BUILDING_CONVERSATION_HEADER ; else if ( layout == JFapChannelConstants . XMIT_SEGMENT_MIDDLE ) state = STATE_BUILDING_CONVERSATION_HEADER ; else if ( layout == JFapChannelConstants . XMIT_SEGMENT_END ) state = STATE_BUILDING_CONVERSATION_HEADER ; else { if ( tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "unexpected layout: " + layout + " in state: " + state ) ; state = STATE_ERROR ; error = new SIErrorException ( "unexpected layout: " + layout + " in state: " + state ) ; } } break ; case ( STATE_BUILDING_CONVERSATION_HEADER ) : if ( buildHeader ( conversationHeaderFields , xmitBuffer ) ) { if ( layout == JFapChannelConstants . XMIT_CONVERSATION ) state = STATE_BUILDING_PAYLOAD ; else if ( layout == JFapChannelConstants . XMIT_SEGMENT_START ) state = STATE_BUILDING_SEGMENT_HEADER ; else if ( layout == JFapChannelConstants . XMIT_SEGMENT_MIDDLE ) state = STATE_BUILDING_PAYLOAD ; else if ( layout == JFapChannelConstants . XMIT_SEGMENT_END ) state = STATE_BUILDING_PAYLOAD ; else { if ( tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "unexpected layout: " + layout + " in state: " + state ) ; state = STATE_ERROR ; error = new SIErrorException ( "unexpected layout: " + layout + " in state: " + state ) ; } } break ; case ( STATE_BUILDING_SEGMENT_HEADER ) : if ( buildHeader ( segmentedTransmissionHeaderFields , xmitBuffer ) ) { if ( layout == JFapChannelConstants . XMIT_SEGMENT_START ) state = STATE_BUILDING_PAYLOAD ; else { if ( tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "unexpected layout: " + layout + " in state: " + state ) ; state = STATE_ERROR ; error = new SIErrorException ( "unexpected layout: " + layout + " in state: " + state ) ; } } break ; case ( STATE_BUILDING_PAYLOAD ) : if ( buildPayload ( xmitBuffer ) ) { transmissionBuilt = true ; state = STATE_BUILDING_PRIMARY_HEADER ; currentXmitDataBufferIndex = 0 ; headerScratchSpace . clear ( ) ; } break ; case ( STATE_ERROR ) : if ( error == null ) error = new SIErrorException ( "Entered error state without exception been set" ) ; connection . invalidate ( true , error , "error building transmission" ) ; // D224570 break ; default : } } boolean retValue = transmissionBuilt ; if ( transmissionBuilt ) transmissionBuilt = false ; exhausedXmitBuffer = false ; if ( tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "buildTransmission" , "" + retValue ) ; return retValue ;
public class tools { /** * Converts an array of bytes in a hex string ; Taken from * org . apache . commons . codec . binary . Hex . * @ param bytes array of bytes * @ return concatenated hex representation of input byte array */ public static String toHexString ( byte [ ] bytes ) { } }
final int l = bytes . length ; final char [ ] out = new char [ l << 1 ] ; for ( int i = 0 , j = 0 ; i < l ; i ++ ) { out [ j ++ ] = hexDigits [ ( 0xF0 & bytes [ i ] ) >>> 4 ] ; out [ j ++ ] = hexDigits [ 0x0F & bytes [ i ] ] ; } return new String ( out ) ;
public class CmsPropertyCustom { /** * Performs the edit properties action , will be called by the JSP page . < p > * @ param request the HttpServletRequest * @ throws JspException if problems including sub - elements occur */ @ Override public void actionEdit ( HttpServletRequest request ) throws JspException { } }
// save initialized instance of this class in request attribute for included sub - elements getJsp ( ) . getRequest ( ) . setAttribute ( SESSION_WORKPLACE_CLASS , this ) ; try { // save the changes only if resource is properly locked if ( isEditable ( ) ) { performEditOperation ( request ) ; } } catch ( Throwable e ) { // Cms error defining property , show error dialog includeErrorpage ( this , e ) ; }
public class SamplingCollector { /** * / * ( non - Javadoc ) * @ see com . oath . cyclops . react . collectors . lazy . LazyResultConsumer # withResults ( java . util . Collection ) */ @ Override public LazyResultConsumer < T > withResults ( final Collection < FastFuture < T > > t ) { } }
return this . withConsumer ( consumer . withResults ( t ) ) ;
public class LogTemplates { /** * Produces a log template which logs something every N split . * @ param delegateLogger Concrete log template * @ param period N value , period * @ return Logger */ public static < C > LogTemplate < C > everyNSplits ( LogTemplate < C > delegateLogger , int period ) { } }
return new CounterLogTemplate < > ( delegateLogger , period ) ;
public class CommerceNotificationTemplatePersistenceImpl { /** * Returns the first commerce notification template in the ordered set where uuid = & # 63 ; . * @ param uuid the uuid * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the first matching commerce notification template * @ throws NoSuchNotificationTemplateException if a matching commerce notification template could not be found */ @ Override public CommerceNotificationTemplate findByUuid_First ( String uuid , OrderByComparator < CommerceNotificationTemplate > orderByComparator ) throws NoSuchNotificationTemplateException { } }
CommerceNotificationTemplate commerceNotificationTemplate = fetchByUuid_First ( uuid , orderByComparator ) ; if ( commerceNotificationTemplate != null ) { return commerceNotificationTemplate ; } StringBundler msg = new StringBundler ( 4 ) ; msg . append ( _NO_SUCH_ENTITY_WITH_KEY ) ; msg . append ( "uuid=" ) ; msg . append ( uuid ) ; msg . append ( "}" ) ; throw new NoSuchNotificationTemplateException ( msg . toString ( ) ) ;
public class DialogRootView { /** * Adds spacing to the view of a specific area . The spacing is added to the view ' s current * bottom padding . * @ param previousArea * The area , the view , the spacing should be applied to , corresponds to , as an instance * of the class { @ link Area } . The area may not be null * @ param previousView * The view , the spacing should be applied to , as an instance of the class { @ link View } . * The view may not be null * @ param area * The current area as a value of the enum { @ link Area } . The area may not be null * @ return A pair , which contains the top and bottom padding , which should be added to the * dialog ' s scroll view , as an instance of the class { @ link Pair } value */ @ NonNull private Pair < Integer , Integer > addViewSpacing ( @ NonNull final Area previousArea , @ NonNull final View previousView , @ NonNull final Area area ) { } }
int scrollViewPaddingTop = 0 ; int scrollViewMarginBottom = 0 ; int padding = - 1 ; if ( previousArea == Area . TITLE ) { padding = getResources ( ) . getDimensionPixelSize ( R . dimen . dialog_title_bottom_padding ) ; } else if ( previousArea == Area . MESSAGE ) { padding = getResources ( ) . getDimensionPixelSize ( R . dimen . dialog_message_bottom_padding ) ; } if ( previousArea != Area . HEADER && ! scrollableArea . isScrollable ( previousArea ) && scrollableArea . isScrollable ( area ) ) { int originalPadding = padding ; padding = originalPadding / 2 ; scrollViewPaddingTop = originalPadding - padding ; } else if ( area == Area . BUTTON_BAR && scrollableArea . isScrollable ( previousArea ) && ! scrollableArea . isScrollable ( area ) ) { int originalPadding = padding ; padding = originalPadding / 2 ; scrollViewMarginBottom = originalPadding - padding ; } if ( padding != - 1 ) { previousView . setPadding ( previousView . getPaddingLeft ( ) , previousView . getPaddingTop ( ) , previousView . getPaddingRight ( ) , previousView . getPaddingBottom ( ) + padding ) ; } return Pair . create ( scrollViewPaddingTop , scrollViewMarginBottom ) ;
public class Application { /** * Get the connection to the server for this applet . * Optionally create the server connection . * @ param localTaskOwner The task that will own this remote task ( or application ) server ) [ If null , get the app server ] . * @ param strUserID The user id ( or name ) to initialize the server ' s application to . * @ param bCreateIfNotFound If the server is null , initialize the server . * @ return The server object ( application defined ) . */ public RemoteTask getRemoteTask ( Task localTaskOwner , String strUserID , boolean bCreateIfNotFound ) { } }
return this . getRemoteTask ( localTaskOwner , strUserID , null , bCreateIfNotFound ) ;
public class StringUtilities { /** * Checks to see if a string entered is alpha numeric * @ param input The string to be tested * @ return True if the string is alpha numeric otherwise false */ public static boolean isAlphanumeric ( final String input ) { } }
for ( int i = 0 ; i < input . length ( ) ; i ++ ) { if ( ! Character . isLetterOrDigit ( input . charAt ( i ) ) ) return false ; } return true ;
public class AbstractAWSStorageGatewayAsync { /** * Simplified method form for invoking the RemoveTagsFromResource operation with an AsyncHandler . * @ see # removeTagsFromResourceAsync ( RemoveTagsFromResourceRequest , com . amazonaws . handlers . AsyncHandler ) */ @ Override public java . util . concurrent . Future < RemoveTagsFromResourceResult > removeTagsFromResourceAsync ( com . amazonaws . handlers . AsyncHandler < RemoveTagsFromResourceRequest , RemoveTagsFromResourceResult > asyncHandler ) { } }
return removeTagsFromResourceAsync ( new RemoveTagsFromResourceRequest ( ) , asyncHandler ) ;
public class HillClimberWindowTinyLfuPolicy { /** * Promotes the entry to the protected region ' s MRU position , demoting an entry if necessary . */ private void onProbationHit ( Node node ) { } }
node . remove ( ) ; node . queue = PROTECTED ; node . appendToTail ( headProtected ) ; protectedSize ++ ; demoteProtected ( ) ;
public class DatagramUtil { /** * 将四个字节转换为一个int类型的数字 * @ param data data数据 * @ param start 四个字节长度开始的位置 * @ return 四个byte转换为的一个int */ public static int convert ( byte [ ] data , int start ) { } }
return ( Byte . toUnsignedInt ( data [ start ] ) << 24 ) | ( Byte . toUnsignedInt ( data [ start + 1 ] ) << 16 ) | ( Byte . toUnsignedInt ( data [ start + 2 ] ) << 8 ) | Byte . toUnsignedInt ( data [ start + 3 ] ) ;
public class JsonPath { /** * Creates a new JsonPath and applies it to the provided Json string * @ param json a json string * @ param jsonPath the json path * @ param filters filters to be applied to the filter place holders [ ? ] in the path * @ param < T > expected return type * @ return list of objects matched by the given path */ @ SuppressWarnings ( { } }
"unchecked" } ) public static < T > T read ( String json , String jsonPath , Predicate ... filters ) { return new ParseContextImpl ( ) . parse ( json ) . read ( jsonPath , filters ) ;
public class ArgTokenizer { /** * Consume the remainder of the input . This is useful to sure all options * have been encountered and to check to unexpected additional non - option * input . * @ return the string - separated concatenation of all remaining non - option * arguments . */ String remainder ( ) { } }
List < String > rem = new ArrayList < > ( ) ; while ( next ( ) != null ) { rem . add ( sval ) ; } return String . join ( " " , rem ) ;
public class StepExecution { /** * Returned values from the execution of the step . * @ param outputs * Returned values from the execution of the step . * @ return Returns a reference to this object so that method calls can be chained together . */ public StepExecution withOutputs ( java . util . Map < String , java . util . List < String > > outputs ) { } }
setOutputs ( outputs ) ; return this ;
public class Cluster { /** * A map of the cluster ' s subnets and their corresponding Availability Zones . * @ param subnetMapping * A map of the cluster ' s subnets and their corresponding Availability Zones . * @ return Returns a reference to this object so that method calls can be chained together . */ public Cluster withSubnetMapping ( java . util . Map < String , String > subnetMapping ) { } }
setSubnetMapping ( subnetMapping ) ; return this ;
public class DateUtility { /** * Converts an instance of < code > Date < / code > into the canonical lexical * representation of an XSD dateTime with the following exceptions : - Dates * before 1 CE ( i . e . 1 AD ) are handled according to ISO 8601:2000 Second * Edition : " 0000 " is the lexical representation of 1 BCE " - 0001 " is the * lexical representation of 2 BCE * @ param date * Instance of java . util . Date . * @ return the lexical form of the XSD dateTime value , e . g . * " 2006-11-13T09:40:55.001Z " . * @ see < a * href = " http : / / www . w3 . org / TR / xmlschema - 2 / # date - canonical - representation " > 3.2.7.2 * Canonical representation < / a > */ public static String convertDateToXSDString ( Date date ) { } }
if ( date == null ) { return null ; } String dateTime = convertDateToString ( date , true ) ; if ( date . before ( ONE_CE ) ) { // fix the format for lexical representation of the year // e . g . 1 BCE : 0000-01.01 ( 1BCE is year 0) int pos = dateTime . indexOf ( '-' , 1 ) ; int year = Integer . parseInt ( dateTime . substring ( 0 , pos ) ) ; if ( year == - 1 ) { dateTime = "0000" + dateTime . substring ( pos ) ; } else if ( year < 0 ) { year += 1 ; String prefix = "" ; if ( year > - 10 ) { prefix = "000" ; } else if ( year > - 100 ) { prefix = "00" ; } else if ( year > - 1000 ) { prefix = "0" ; } dateTime = "-" + prefix + Math . abs ( year ) + dateTime . substring ( pos ) ; } } // fix the format for the lexical representation of the milliseconds , // no leading 0s are allowed , and if it ' s all zeros it has to be // removed . int posDot = dateTime . indexOf ( '.' ) ; int posZ = dateTime . indexOf ( 'Z' ) ; int millis = Integer . parseInt ( dateTime . substring ( posDot + 1 , posZ ) ) ; String milliString ; if ( millis == 0 ) { milliString = "" ; } else if ( millis < 10 ) { milliString = ".00" + millis ; } else if ( millis < 100 ) { milliString = ".0" + millis ; } else { milliString = "." + millis ; } while ( milliString . length ( ) > 0 && milliString . charAt ( milliString . length ( ) - 1 ) == '0' ) { milliString = milliString . substring ( 0 , milliString . length ( ) - 1 ) ; } dateTime = dateTime . substring ( 0 , posDot ) + milliString + "Z" ; return dateTime ;
public class FineUploader5Session { /** * Any parameters you would like passed with the associated GET request to * your server . * @ param aParams * New parameters to be added . * @ return this */ @ Nonnull public FineUploader5Session addParams ( @ Nullable final Map < String , String > aParams ) { } }
m_aSessionParams . addAll ( aParams ) ; return this ;
public class ProvFactory { /** * A factory method to create an instance of a specialization { @ link SpecializationOf } * @ param specific an identifier ( { @ link QualifiedName } ) for the specific { @ link Entity } * @ param general an identifier ( { @ link QualifiedName } ) for the general { @ link Entity } * @ return an instance of { @ link SpecializationOf } */ @ Override public SpecializationOf newSpecializationOf ( QualifiedName specific , QualifiedName general ) { } }
SpecializationOf res = of . createSpecializationOf ( ) ; res . setSpecificEntity ( specific ) ; res . setGeneralEntity ( general ) ; return res ;
public class DeletingWhileIterating { /** * implements the visitor to setup the opcode stack , collectionGroups , groupToIterator and loops * @ param classContext * the context object of the currently parsed class */ @ Override public void visitClassContext ( ClassContext classContext ) { } }
if ( ( collectionClass == null ) || ( iteratorClass == null ) ) { return ; } try { collectionGroups = new ArrayList < > ( ) ; groupToIterator = new HashMap < > ( ) ; loops = new HashMap < > ( 10 ) ; super . visitClassContext ( classContext ) ; } finally { collectionGroups = null ; groupToIterator = null ; loops = null ; endOfScopes = null ; }
public class Client { /** * { @ inheritDoc } */ public void setPermissions ( IConnection conn , Collection < String > permissions ) { } }
if ( permissions == null ) { conn . removeAttribute ( PERMISSIONS ) ; } else { conn . setAttribute ( PERMISSIONS , permissions ) ; }
public class ExecutionEntityImpl { /** * The current flow element , will be filled during operation execution */ public FlowElement getCurrentFlowElement ( ) { } }
if ( currentFlowElement == null ) { String processDefinitionId = getProcessDefinitionId ( ) ; if ( processDefinitionId != null ) { org . activiti . bpmn . model . Process process = ProcessDefinitionUtil . getProcess ( processDefinitionId ) ; currentFlowElement = process . getFlowElement ( getCurrentActivityId ( ) , true ) ; } } return currentFlowElement ;
public class ResultFactory { /** * Find a parser to handle the protocol response body based on the content type found in the response * and the expected result type specified by the user ; if one or both fields is missing then * attempts to choose a sensible default . * @ param mediaType The content type in the response , or null if none was given . * @ param expectedType The expected response type indicated by the user , or * @ return */ private static final ResultParser findParser ( String mediaType , ResultType expectedType ) { } }
ResponseFormat format = null ; // Prefer MIME type when choosing result format . if ( mediaType != null ) { mediaType = stripParams ( mediaType ) ; format = mimeFormats . get ( mediaType ) ; if ( format == null ) { logger . warn ( "Unrecognized media type ({}) in SPARQL server response" , mediaType ) ; } else { logger . debug ( "Using result format {} for media type {}" , format , mediaType ) ; } } // If MIME type was absent or unrecognized , choose default based on expected result type . if ( format == null ) { logger . debug ( "Unable to determine result format from media type" ) ; if ( expectedType != null ) { format = defaultTypeFormats . get ( expectedType ) ; logger . debug ( "Using default format {} for expected result type {}" , format , expectedType ) ; } else { format = DEFAULT_FORMAT ; logger . debug ( "No expected type provided; using default format {}" , format ) ; } } assert format != null : "Could not determine result format" ; // Validate that the chosen format can produce the expected result type . if ( expectedType != null && ! format . resultTypes . contains ( expectedType ) ) { throw new SparqlException ( "Result format " + format + " does not support expected result type " + expectedType ) ; } return format . parser ;
public class ResponseTypeBuilder { /** * Dummy convert * @ param decision * @ return */ private String getStatusCode ( DecisionType decision ) { } }
if ( fXACMLParsingError ) { return STATUS_CODE_SYNTAX_ERROR ; } else if ( fProcessingError ) { return STATUS_CODE_PROCESSING_ERROR ; } else if ( decision == DecisionType . Deny || decision == DecisionType . Permit ) { return STATUS_CODE_OK ; } else if ( decision == DecisionType . Indeterminate || decision == DecisionType . Indeterminate_D || decision == DecisionType . Indeterminate_P || decision == DecisionType . Indeterminate_DP ) { return STATUS_CODE_MISSING_ATTRIBUTE ; } else { logger . debug ( "Unknown status code decision" ) ; return STATUS_CODE_PROCESSING_ERROR ; }
public class Worker { /** * Remove ourselves from parents worker threads and close our zk connection */ private void shutdown ( ) { } }
parent . workerThreads . get ( plan ) . remove ( this ) ; if ( zk != null ) { try { logger . debug ( "closing " + zk . getSessionId ( ) ) ; zk . close ( ) ; zk = null ; } catch ( InterruptedException e1 ) { logger . debug ( e1 ) ; } logger . debug ( "shutdown complete" ) ; }
public class DefaultPool { /** * { @ inheritDoc } */ public void destroyConnectionListener ( ConnectionListener cl ) throws ResourceException { } }
if ( getInternalStatistics ( ) . isEnabled ( ) ) getInternalStatistics ( ) . deltaDestroyedCount ( ) ; try { cl . getManagedConnection ( ) . destroy ( ) ; } catch ( Exception e ) { throw new ResourceException ( e ) ; } finally { cl . setState ( DESTROYED ) ; semaphore . release ( ) ; }
public class TextRangeCalculator { /** * Calculates the beginLine of a violation report . * @ param pmdViolation The violation for which the beginLine should be calculated . * @ return The beginLine is assumed to be the line with the smallest number . However , if the smallest number is * out - of - range ( non - positive ) , it takes the other number . */ private static int calculateBeginLine ( RuleViolation pmdViolation ) { } }
int minLine = Math . min ( pmdViolation . getBeginLine ( ) , pmdViolation . getEndLine ( ) ) ; return minLine > 0 ? minLine : calculateEndLine ( pmdViolation ) ;
public class Graph { /** * 对graph进行调试用的 */ public void printGraph ( ) { } }
for ( Term term : terms ) { if ( term == null ) { continue ; } System . out . print ( term . getName ( ) + "\t" + term . score ( ) + " ," ) ; while ( ( term = term . next ( ) ) != null ) { System . out . print ( term + "\t" + term . score ( ) + " ," ) ; } System . out . println ( ) ; }
public class SVGAndroidRenderer { private void render ( SVG . Symbol obj , Box viewPort ) { } }
debug ( "Symbol render" ) ; if ( viewPort . width == 0f || viewPort . height == 0f ) return ; // " If attribute ' preserveAspectRatio ' is not specified , then the effect is as if a value of xMidYMid meet were specified . " PreserveAspectRatio positioning = ( obj . preserveAspectRatio != null ) ? obj . preserveAspectRatio : PreserveAspectRatio . LETTERBOX ; updateStyleForElement ( state , obj ) ; state . viewPort = viewPort ; if ( ! state . style . overflow ) { setClipRect ( state . viewPort . minX , state . viewPort . minY , state . viewPort . width , state . viewPort . height ) ; } if ( obj . viewBox != null ) { canvas . concat ( calculateViewBoxTransform ( state . viewPort , obj . viewBox , positioning ) ) ; state . viewBox = obj . viewBox ; } else { canvas . translate ( state . viewPort . minX , state . viewPort . minY ) ; } boolean compositing = pushLayer ( ) ; renderChildren ( obj , true ) ; if ( compositing ) popLayer ( obj ) ; updateParentBoundingBox ( obj ) ;
public class LspGetq { /** * compose LSP parameter from elementary LSP with previous LSP . */ public static void lsp_prev_compose ( float lsp_ele [ ] , /* ( i ) Q13 : LSP vectors */ float lsp [ ] , /* ( o ) Q13 : quantized LSP parameters */ float fg [ ] [ ] , /* ( i ) Q15 : MA prediction coef . */ float freq_prev [ ] [ ] , /* ( i ) Q13 : previous LSP vector */ float fg_sum [ ] /* ( i ) Q15 : present MA prediction coef . */ ) { } }
int j , k ; for ( j = 0 ; j < LD8KConstants . M ; j ++ ) { lsp [ j ] = lsp_ele [ j ] * fg_sum [ j ] ; for ( k = 0 ; k < LD8KConstants . MA_NP ; k ++ ) lsp [ j ] += freq_prev [ k ] [ j ] * fg [ k ] [ j ] ; } return ;
public class ns_ns_savedconfig { /** * Use this operation to get saved configuration from NetScaler Instance . */ public static ns_ns_savedconfig get ( nitro_service client , ns_ns_savedconfig resource ) throws Exception { } }
resource . validate ( "get" ) ; return ( ( ns_ns_savedconfig [ ] ) resource . get_resources ( client ) ) [ 0 ] ;
public class AbstractFieldStyler { /** * Create the PdfFormField that will be used to add a form field to the pdf . * @ return * @ throws IOException * @ throws DocumentException * @ throws VectorPrintException */ @ Override public PdfFormField makeField ( ) throws IOException , DocumentException , VectorPrintException { } }
switch ( getFieldtype ( ) ) { case TEXT : return ( ( TextField ) bf ) . getTextField ( ) ; case COMBO : return ( ( TextField ) bf ) . getComboField ( ) ; case LIST : return ( ( TextField ) bf ) . getListField ( ) ; case BUTTON : return ( ( PushbuttonField ) bf ) . getField ( ) ; case CHECKBOX : return ( ( RadioCheckField ) bf ) . getCheckField ( ) ; case RADIO : return ( ( RadioCheckField ) bf ) . getRadioField ( ) ; } throw new VectorPrintException ( String . format ( "cannot create pdfformfield from %s and %s" , ( bf != null ) ? bf . getClass ( ) : null , String . valueOf ( getFieldtype ( ) ) ) ) ;
public class ESClient { /** * Sets the batch size . * @ param persistenceUnit * the persistence unit * @ param puProperties * the pu properties */ private void setBatchSize ( String persistenceUnit , Map < String , Object > puProperties ) { } }
String batch_Size = null ; if ( puProperties != null ) { Object externalBatchSize = puProperties . get ( PersistenceProperties . KUNDERA_BATCH_SIZE ) ; externalBatchSize = externalBatchSize != null ? externalBatchSize . toString ( ) : null ; batch_Size = puProperties != null ? ( String ) externalBatchSize : null ; if ( batch_Size != null ) { batchSize = Integer . valueOf ( batch_Size ) ; if ( batchSize == 0 ) { throw new IllegalArgumentException ( "kundera.batch.size property must be numeric and > 0." ) ; } } } else if ( batch_Size == null ) { PersistenceUnitMetadata puMetadata = KunderaMetadataManager . getPersistenceUnitMetadata ( kunderaMetadata , persistenceUnit ) ; batchSize = puMetadata != null ? puMetadata . getBatchSize ( ) : 0 ; }
public class DoCopy { /** * Copy a resource . * @ param transaction indicates that the method is within the scope of a WebDAV transaction * @ param req Servlet request * @ param resp Servlet response * @ return true if the copy is successful * @ throws WebdavException if an error in the underlying store occurs * @ throws IOException when an error occurs while sending the response * @ throws LockFailedException */ public boolean copyResource ( ITransaction transaction , HttpServletRequest req , HttpServletResponse resp ) throws WebdavException , IOException , LockFailedException { } }
// Parsing destination header String destinationPath = parseDestinationHeader ( req , resp ) ; if ( destinationPath == null ) { return false ; } String path = getRelativePath ( req ) ; if ( path . equals ( destinationPath ) ) { resp . sendError ( WebdavStatus . SC_FORBIDDEN ) ; return false ; } Hashtable < String , Integer > errorList = new Hashtable < String , Integer > ( ) ; String parentDestinationPath = getParentPath ( getCleanPath ( destinationPath ) ) ; if ( ! isUnlocked ( transaction , req , resourceLocks , parentDestinationPath ) ) { resp . setStatus ( WebdavStatus . SC_LOCKED ) ; return false ; // parentDestination is locked } if ( ! isUnlocked ( transaction , req , resourceLocks , destinationPath ) ) { resp . setStatus ( WebdavStatus . SC_LOCKED ) ; return false ; // destination is locked } // Parsing overwrite header boolean overwrite = shouldOverwrite ( req ) ; // Overwriting the destination String lockOwner = "copyResource" + System . currentTimeMillis ( ) + req . toString ( ) ; if ( resourceLocks . lock ( transaction , destinationPath , lockOwner , false , 0 , TEMP_TIMEOUT , TEMPORARY ) ) { StoredObject copySo , destinationSo = null ; try { copySo = store . getStoredObject ( transaction , path ) ; // Retrieve the resources if ( copySo == null ) { resp . sendError ( HttpServletResponse . SC_NOT_FOUND ) ; return false ; } if ( copySo . isNullResource ( ) ) { String methodsAllowed = DeterminableMethod . determineMethodsAllowed ( copySo ) ; resp . addHeader ( "Allow" , methodsAllowed ) ; resp . sendError ( WebdavStatus . SC_METHOD_NOT_ALLOWED ) ; return false ; } errorList = new Hashtable < String , Integer > ( ) ; destinationSo = store . getStoredObject ( transaction , destinationPath ) ; if ( overwrite ) { // Delete destination resource , if it exists if ( destinationSo != null ) { doDelete . deleteResource ( transaction , destinationPath , errorList , req , resp ) ; } else { resp . setStatus ( WebdavStatus . SC_CREATED ) ; } } else { // If the destination exists , then it ' s a conflict if ( destinationSo != null ) { resp . sendError ( WebdavStatus . SC_PRECONDITION_FAILED ) ; return false ; } resp . setStatus ( WebdavStatus . SC_CREATED ) ; } copy ( transaction , path , destinationPath , errorList , req , resp ) ; if ( ! errorList . isEmpty ( ) ) { sendReport ( req , resp , errorList ) ; } } finally { resourceLocks . unlockTemporaryLockedObjects ( transaction , destinationPath , lockOwner ) ; } } else { resp . sendError ( WebdavStatus . SC_INTERNAL_SERVER_ERROR ) ; return false ; } return true ;
public class FunctionType { /** * Computes the supremum or infimum of two functions . Because sup ( ) and inf ( ) share a lot of logic * for functions , we use a single helper . * @ param leastSuper If true , compute the supremum of { @ code this } with { @ code that } . Otherwise , * compute the infimum . * @ return The least supertype or greatest subtype . */ final FunctionType supAndInfHelper ( FunctionType that , boolean leastSuper ) { } }
// NOTE ( nicksantos ) : When we remove the unknown type , the function types // form a lattice with the universal constructor at the top of the lattice , // and the LEAST _ FUNCTION _ TYPE type at the bottom of the lattice . // When we introduce the unknown type , it ' s much more difficult to make // heads or tails of the partial ordering of types , because there ' s no // clear hierarchy between the different components ( parameter types and // return types ) in the ArrowType . // Rather than make the situation more complicated by introducing new // types ( like unions of functions ) , we just fallback on the simpler // approach of getting things right at the top and the bottom of the // lattice . // If there are unknown parameters or return types making things // ambiguous , then sup ( A , B ) is always the top function type , and // inf ( A , B ) is always the bottom function type . checkNotNull ( that ) ; if ( isEquivalentTo ( that ) ) { return this ; } // If these are ordinary functions , then merge them . // Don ' t do this if any of the params / return // values are unknown , because then there will be cycles in // their local lattice and they will merge in weird ways . if ( isOrdinaryFunction ( ) && that . isOrdinaryFunction ( ) && ! this . call . hasUnknownParamsOrReturn ( ) && ! that . call . hasUnknownParamsOrReturn ( ) ) { // Check for the degenerate case , but double check // that there ' s not a cycle . boolean isSubtypeOfThat = isSubtype ( that ) ; boolean isSubtypeOfThis = that . isSubtype ( this ) ; if ( isSubtypeOfThat && ! isSubtypeOfThis ) { return leastSuper ? that : this ; } else if ( isSubtypeOfThis && ! isSubtypeOfThat ) { return leastSuper ? this : that ; } // Merge the two functions component - wise . FunctionType merged = tryMergeFunctionPiecewise ( that , leastSuper ) ; if ( merged != null ) { return merged ; } } // The function instance type is a special case // that lives above the rest of the lattice . JSType functionInstance = registry . getNativeType ( JSTypeNative . FUNCTION_INSTANCE_TYPE ) ; if ( functionInstance . isEquivalentTo ( that ) ) { return leastSuper ? that : this ; } else if ( functionInstance . isEquivalentTo ( this ) ) { return leastSuper ? this : that ; } // In theory , we should be using the GREATEST _ FUNCTION _ TYPE as the // greatest function . In practice , we don ' t because it ' s way too // broad . The greatest function takes var _ args None parameters , which // means that all parameters register a type warning . // Instead , we use the U2U ctor type , which has unknown type args . FunctionType greatestFn = registry . getNativeFunctionType ( JSTypeNative . U2U_CONSTRUCTOR_TYPE ) ; FunctionType leastFn = registry . getNativeFunctionType ( JSTypeNative . LEAST_FUNCTION_TYPE ) ; return leastSuper ? greatestFn : leastFn ;
public class SQSMessageProducer { /** * Not verified on the client side , but SQS Attribute names must be valid * letter or digit on the basic multilingual plane in addition to allowing * ' _ ' , ' - ' and ' . ' . No component of an attribute name may be empty , thus an * attribute name may neither start nor end in ' . ' . And it may not contain */ Map < String , MessageAttributeValue > propertyToMessageAttribute ( SQSMessage message ) throws JMSException { } }
Map < String , MessageAttributeValue > messageAttributes = new HashMap < String , MessageAttributeValue > ( ) ; Enumeration < String > propertyNames = message . getPropertyNames ( ) ; while ( propertyNames . hasMoreElements ( ) ) { String propertyName = propertyNames . nextElement ( ) ; // This is generated from SQS message attribute " ApproximateReceiveCount " if ( propertyName . equals ( SQSMessagingClientConstants . JMSX_DELIVERY_COUNT ) ) { continue ; } // This property will be used as DeduplicationId argument of SendMessage call // On receive it is mapped back to this JMS property if ( propertyName . equals ( SQSMessagingClientConstants . JMS_SQS_DEDUPLICATION_ID ) ) { continue ; } // the JMSXGroupID and JMSXGroupSeq are always stored as message // properties , so they are not lost between send and receive // even though SQS Classic does not respect those values when returning messages // and SQS FIFO has a different understanding of message groups JMSMessagePropertyValue propertyObject = message . getJMSMessagePropertyValue ( propertyName ) ; MessageAttributeValue messageAttributeValue = new MessageAttributeValue ( ) ; messageAttributeValue . setDataType ( propertyObject . getType ( ) ) ; messageAttributeValue . setStringValue ( propertyObject . getStringMessageAttributeValue ( ) ) ; messageAttributes . put ( propertyName , messageAttributeValue ) ; } return messageAttributes ;
public class HttpClientVerifyBuilder { /** * Adds parameter condition . Parameter value must match . * @ param name parameter name * @ param matcher parameter value matcher * @ return verification builder */ public HttpClientVerifyBuilder withParameter ( String name , Matcher < String > matcher ) { } }
ruleBuilder . addParameterCondition ( name , matcher ) ; return this ;
public class BaseAttribute { /** * todo dk : think about specific method versions for each allowed type */ public void setValue ( Object newValue ) { } }
Object checkedValue = checkValue ( newValue ) ; setDirty ( isDifferent ( baseValue , checkedValue ) ) ; if ( isDifferent ( value , checkedValue ) ) { // firePropertyChange doesn ' t do this check sufficiently firePropertyChange ( VALUE , value , value = checkedValue ) ; // set inline to avoid recursion }
public class Database { /** * Prepare and return SQLite3 statement for SQL . Only available * in SQLite 3.0 and above , otherwise a no - op . * @ param sql SQL statement to be prepared * @ return a Stmt object */ public Stmt prepare ( String sql ) throws jsqlite . Exception { } }
synchronized ( this ) { Stmt stmt = new Stmt ( ) ; stmt_prepare ( sql , stmt ) ; return stmt ; }
public class Converter { /** * convertBase64ToHex */ private static void convertDateString ( String str ) { } }
try { Date date = Utils . dateFromString ( str ) ; display ( "Date to millis: " + date . getTime ( ) ) ; } catch ( Exception e ) { }
public class UCaseProps { /** * / * Is followed by one or more cc = = 230 ? */ private final boolean isFollowedByMoreAbove ( ContextIterator iter ) { } }
int c ; int dotType ; if ( iter == null ) { return false ; } for ( iter . reset ( 1 ) ; ( c = iter . next ( ) ) >= 0 ; ) { dotType = getDotType ( c ) ; if ( dotType == ABOVE ) { return true ; /* at least one cc = = 230 following */ } else if ( dotType != OTHER_ACCENT ) { return false ; /* next base character , no more cc = = 230 following */ } } return false ; /* no more cc = = 230 following */
public class ResultCodeMapper { /** * Checks the result code and raises an exception is not { @ link # MDB _ SUCCESS } . * @ param rc the LMDB result code * @ throws LmdbNativeException the resolved exception */ static void checkRc ( final int rc ) throws LmdbNativeException { } }
switch ( rc ) { case MDB_SUCCESS : return ; case Dbi . BadDbiException . MDB_BAD_DBI : throw new Dbi . BadDbiException ( ) ; case BadReaderLockException . MDB_BAD_RSLOT : throw new BadReaderLockException ( ) ; case BadException . MDB_BAD_TXN : throw new BadException ( ) ; case Dbi . BadValueSizeException . MDB_BAD_VALSIZE : throw new Dbi . BadValueSizeException ( ) ; case LmdbNativeException . PageCorruptedException . MDB_CORRUPTED : throw new LmdbNativeException . PageCorruptedException ( ) ; case Cursor . FullException . MDB_CURSOR_FULL : throw new Cursor . FullException ( ) ; case Dbi . DbFullException . MDB_DBS_FULL : throw new Dbi . DbFullException ( ) ; case Dbi . IncompatibleException . MDB_INCOMPATIBLE : throw new Dbi . IncompatibleException ( ) ; case Env . FileInvalidException . MDB_INVALID : throw new Env . FileInvalidException ( ) ; case Dbi . KeyExistsException . MDB_KEYEXIST : throw new Dbi . KeyExistsException ( ) ; case Env . MapFullException . MDB_MAP_FULL : throw new Env . MapFullException ( ) ; case Dbi . MapResizedException . MDB_MAP_RESIZED : throw new Dbi . MapResizedException ( ) ; case Dbi . KeyNotFoundException . MDB_NOTFOUND : throw new Dbi . KeyNotFoundException ( ) ; case LmdbNativeException . PageFullException . MDB_PAGE_FULL : throw new LmdbNativeException . PageFullException ( ) ; case LmdbNativeException . PageNotFoundException . MDB_PAGE_NOTFOUND : throw new LmdbNativeException . PageNotFoundException ( ) ; case LmdbNativeException . PanicException . MDB_PANIC : throw new LmdbNativeException . PanicException ( ) ; case Env . ReadersFullException . MDB_READERS_FULL : throw new Env . ReadersFullException ( ) ; case LmdbNativeException . TlsFullException . MDB_TLS_FULL : throw new LmdbNativeException . TlsFullException ( ) ; case TxFullException . MDB_TXN_FULL : throw new TxFullException ( ) ; case Env . VersionMismatchException . MDB_VERSION_MISMATCH : throw new Env . VersionMismatchException ( ) ; default : break ; } final Constant constant = CONSTANTS . getConstant ( rc ) ; if ( constant == null ) { throw new IllegalArgumentException ( "Unknown result code " + rc ) ; } final String msg = constant . name ( ) + " " + constant . toString ( ) ; throw new LmdbNativeException . ConstantDerviedException ( rc , msg ) ;
public class GetIdentityMailFromDomainAttributesResult { /** * A map of identities to custom MAIL FROM attributes . * @ param mailFromDomainAttributes * A map of identities to custom MAIL FROM attributes . */ public void setMailFromDomainAttributes ( java . util . Map < String , IdentityMailFromDomainAttributes > mailFromDomainAttributes ) { } }
this . mailFromDomainAttributes = mailFromDomainAttributes == null ? null : new com . amazonaws . internal . SdkInternalMap < String , IdentityMailFromDomainAttributes > ( mailFromDomainAttributes ) ;
public class Timer { /** * Times and records the duration of an event . * @ param event a { @ link Callable } whose { @ link Callable # call ( ) } method implements a process * whose duration should be timed * @ param < T > the type of the value returned by { @ code event } * @ return the value returned by { @ code event } * @ throws Exception if { @ code event } throws an { @ link Exception } */ public < T > T time ( Callable < T > event ) throws Exception { } }
final long startTime = clock . getTick ( ) ; try { return event . call ( ) ; } finally { update ( clock . getTick ( ) - startTime ) ; }
public class MkCoPTree { /** * Approximates the lower hull . * @ param convexHull * @ param log _ kDist * @ param sum _ log _ kDist * @ param sum _ log _ k _ kDist */ private ApproximationLine approximateLowerHull ( ConvexHull convexHull , double [ ] log_k , double sum_log_k , double sum_log_k2 , double [ ] log_kDist , double sum_log_kDist , double sum_log_k_kDist ) { } }
// StringBuilder msg = new StringBuilder ( 1000 ) ; int [ ] lowerHull = convexHull . getLowerHull ( ) ; int l = convexHull . getNumberOfPointsInLowerHull ( ) ; int k_0 = settings . kmax - lowerHull . length + 1 ; // linear search on all line segments on the lower convex hull // msg . append ( " lower hull l = " ) . append ( l ) . append ( ' \ n ' ) ; double low_error = Double . MAX_VALUE ; double low_m = 0.0 ; double low_t = 0.0 ; for ( int i = 1 ; i < l ; i ++ ) { double cur_m = ( log_kDist [ lowerHull [ i ] ] - log_kDist [ lowerHull [ i - 1 ] ] ) / ( log_k [ lowerHull [ i ] ] - log_k [ lowerHull [ i - 1 ] ] ) ; double cur_t = log_kDist [ lowerHull [ i ] ] - cur_m * log_k [ lowerHull [ i ] ] ; double cur_error = ssqerr ( k_0 , settings . kmax , log_k , log_kDist , cur_m , cur_t ) ; // msg . append ( " Segment = " ) . append ( i ) . append ( " m = // " ) . append ( cur _ m ) . append ( " t = " ) . append ( cur _ t ) . append ( " lowerror = // " ) . append ( cur _ error ) . append ( ' \ n ' ) ; if ( cur_error < low_error ) { low_error = cur_error ; low_m = cur_m ; low_t = cur_t ; } } // linear search on all points of the lower convex hull boolean is_right = true ; // NEEDED FOR PROOF CHECK for ( int i = 0 ; i < l ; i ++ ) { double cur_m = optimize ( k_0 , settings . kmax , sum_log_k , sum_log_k2 , log_k [ lowerHull [ i ] ] , log_kDist [ lowerHull [ i ] ] , sum_log_k_kDist , sum_log_kDist ) ; double cur_t = log_kDist [ lowerHull [ i ] ] - cur_m * log_k [ lowerHull [ i ] ] ; // only valid if both neighboring points are underneath y = mx + t if ( ( i == 0 || log_kDist [ lowerHull [ i - 1 ] ] >= log_kDist [ lowerHull [ i ] ] - cur_m * ( log_k [ lowerHull [ i ] ] - log_k [ lowerHull [ i - 1 ] ] ) ) && ( i == l - 1 || log_kDist [ lowerHull [ i + 1 ] ] >= log_kDist [ lowerHull [ i ] ] + cur_m * ( log_k [ lowerHull [ i + 1 ] ] - log_k [ lowerHull [ i ] ] ) ) ) { double cur_error = ssqerr ( k_0 , settings . kmax , log_k , log_kDist , cur_m , cur_t ) ; if ( cur_error < low_error ) { low_error = cur_error ; low_m = cur_m ; low_t = cur_t ; } } // check proof of bisection search if ( ! ( i > 0 && log_kDist [ lowerHull [ i - 1 ] ] < log_kDist [ lowerHull [ i ] ] - cur_m * ( log_k [ lowerHull [ i ] ] - log_k [ lowerHull [ i - 1 ] ] ) ) && ! is_right ) { LOG . warning ( "ERROR lower: The bisection search will not work properly!" ) ; if ( ! ( i < l - 1 && log_kDist [ lowerHull [ i + 1 ] ] < log_kDist [ lowerHull [ i ] ] + cur_m * ( log_k [ lowerHull [ i + 1 ] ] - log_k [ lowerHull [ i ] ] ) ) ) { is_right = false ; } } } return new ApproximationLine ( k_0 , low_m , low_t ) ;
public class Transaction { /** * Adds the given output to this transaction . The output must be completely initialized . Returns the given output . */ public TransactionOutput addOutput ( TransactionOutput to ) { } }
unCache ( ) ; to . setParent ( this ) ; outputs . add ( to ) ; adjustLength ( outputs . size ( ) , to . length ) ; return to ;
public class RecordMapper { /** * Implement in inner class in domain class decorator to ensure access to private and protected fields * @ param field * @ param object * @ throws IllegalAccessException */ protected void setField ( Field field , Object object ) throws IllegalAccessException { } }
dataObject . setField ( field , object ) ;
public class JcsegServer { /** * reset a JcsegTaskConfig from a JSONObject * @ param config * @ param json */ private void resetJcsegTaskConfig ( JcsegTaskConfig config , JSONObject json ) { } }
if ( json . has ( "jcseg_maxlen" ) ) { config . setMaxLength ( json . getInt ( "jcseg_maxlen" ) ) ; } if ( json . has ( "jcseg_icnname" ) ) { config . setICnName ( json . getBoolean ( "jcseg_icnname" ) ) ; } if ( json . has ( "jcseg_pptmaxlen" ) ) { config . setPPT_MAX_LENGTH ( json . getInt ( "jcseg_pptmaxlen" ) ) ; } if ( json . has ( "jcseg_cnmaxlnadron" ) ) { config . setMaxCnLnadron ( json . getInt ( "jcseg_cnmaxlnadron" ) ) ; } if ( json . has ( "jcseg_clearstopword" ) ) { config . setClearStopwords ( json . getBoolean ( "jcseg_clearstopword" ) ) ; } if ( json . has ( "jcseg_cnnumtoarabic" ) ) { config . setCnNumToArabic ( json . getBoolean ( "jcseg_cnnumtoarabic" ) ) ; } if ( json . has ( "jcseg_cnfratoarabic" ) ) { config . setCnFactionToArabic ( json . getBoolean ( "jcseg_cnfratoarabic" ) ) ; } if ( json . has ( "jcseg_keepunregword" ) ) { config . setKeepUnregWords ( json . getBoolean ( "jcseg_keepunregword" ) ) ; } if ( json . has ( "jcseg_ensencondseg" ) ) { config . setEnSecondSeg ( json . getBoolean ( "jcseg_ensencondseg" ) ) ; } if ( json . has ( "jcseg_stokenminlen" ) ) { config . setSTokenMinLen ( json . getInt ( "jcseg_stokenminlen" ) ) ; } if ( json . has ( "jcseg_nsthreshold" ) ) { config . setNameSingleThreshold ( json . getInt ( "jcseg_nsthreshold" ) ) ; } if ( json . has ( "jcseg_keeppunctuations" ) ) { config . setKeepPunctuations ( json . getString ( "jcseg_keeppunctuations" ) ) ; }
public class LdapConnection { /** * Get the unique name for the specified distinguished name . * @ param dn The distinguished name . * @ param entityType The entity type for the distinguished name . * @ param attrs The attributes for the entity . * @ return The unique name . * @ throws WIMException If there was an error retrieving portions of the unique name . */ private String getUniqueName ( String dn , String entityType , Attributes attrs ) throws WIMException { } }
final String METHODNAME = "getUniqueName" ; String uniqueName = null ; dn = iLdapConfigMgr . switchToNode ( dn ) ; if ( iLdapConfigMgr . needTranslateRDN ( ) && iLdapConfigMgr . needTranslateRDN ( entityType ) ) { try { if ( entityType != null ) { LdapEntity ldapEntity = iLdapConfigMgr . getLdapEntity ( entityType ) ; if ( ldapEntity != null ) { String [ ] rdnName = LdapHelper . getRDNAttributes ( dn ) ; String [ ] [ ] rdnWIMProps = ldapEntity . getWIMRDNProperties ( ) ; String [ ] [ ] rdnWIMAttrs = ldapEntity . getWIMRDNAttributes ( ) ; String [ ] [ ] rdnAttrs = ldapEntity . getRDNAttributes ( ) ; Attribute [ ] rdnAttributes = new Attribute [ rdnWIMProps . length ] ; String [ ] rdnAttrValues = new String [ rdnWIMProps . length ] ; for ( int i = 0 ; i < rdnAttrs . length ; i ++ ) { String [ ] rdnAttr = rdnAttrs [ i ] ; boolean isRDN = true ; for ( int j = 0 ; j < rdnAttr . length ; j ++ ) { if ( ! rdnAttr [ j ] . equalsIgnoreCase ( rdnName [ j ] ) ) { isRDN = false ; } } if ( isRDN ) { String [ ] rdnWIMProp = rdnWIMProps [ i ] ; String [ ] rdnWIMAttr = rdnWIMAttrs [ i ] ; boolean retrieveRDNs = false ; if ( attrs == null ) { retrieveRDNs = true ; } else { for ( int k = 0 ; k < rdnWIMAttr . length ; k ++ ) { if ( attrs . get ( rdnWIMAttr [ k ] ) == null ) { retrieveRDNs = true ; break ; } } } if ( retrieveRDNs ) { attrs = getAttributes ( dn , rdnWIMAttr ) ; } for ( int k = 0 ; k < rdnWIMAttr . length ; k ++ ) { rdnAttributes [ k ] = attrs . get ( rdnWIMAttr [ k ] ) ; if ( rdnAttributes [ k ] != null ) { rdnAttrValues [ k ] = ( String ) rdnAttributes [ k ] . get ( ) ; } } uniqueName = LdapHelper . replaceRDN ( dn , rdnWIMProp , rdnAttrValues ) ; } } } } } catch ( NamingException e ) { String msg = Tr . formatMessage ( tc , WIMMessageKey . NAMING_EXCEPTION , WIMMessageHelper . generateMsgParms ( e . toString ( true ) ) ) ; throw new WIMSystemException ( WIMMessageKey . NAMING_EXCEPTION , msg , e ) ; } } if ( uniqueName == null ) { uniqueName = dn ; } else { if ( tc . isDebugEnabled ( ) ) { Tr . debug ( tc , METHODNAME + " Translated uniqueName: " + uniqueName ) ; } } return uniqueName ;
public class MortarScope { /** * Returns the service associated with the given name . * @ throws IllegalArgumentException if no such service can be found * @ throws IllegalStateException if this scope is dead * @ see # hasService */ public < T > T getService ( String serviceName ) { } }
T service = findService ( serviceName , true ) ; if ( service == null ) { throw new IllegalArgumentException ( format ( "No service found named \"%s\"" , serviceName ) ) ; } return service ;
public class ScanSpec { /** * Applicable only when an expression has been specified . Used to * specify the actual values for the attribute - value placeholders . * @ see ScanRequest # withExpressionAttributeValues ( Map ) */ public ScanSpec withValueMap ( Map < String , Object > valueMap ) { } }
if ( valueMap == null ) this . valueMap = null ; else this . valueMap = Collections . unmodifiableMap ( new LinkedHashMap < String , Object > ( valueMap ) ) ; return this ;
public class NetworkUtils { /** * Finds an available port . * @ param maxRetries * the maximum number of retries before an { @ link org . junit . internal . AssumptionViolatedException } is * thrown . * @ return the number of the port that is available */ public static int findAvailablePort ( int maxRetries ) { } }
int retries = 0 ; int randomPort ; boolean portAvailable ; do { randomPort = randomPort ( ) ; portAvailable = isPortAvailable ( randomPort ) ; retries ++ ; } while ( retries <= maxRetries && ! portAvailable ) ; assumeTrue ( "no open port found" , portAvailable ) ; return randomPort ;
public class BaseTraceService { /** * Inject the internal WsMessageRouter . */ protected void setWsMessageRouter ( WsMessageRouter msgRouter ) { } }
internalMessageRouter . set ( msgRouter ) ; // Pass the earlierMessages queue to the router . // Now that the internalMessageRouter is non - null , this class will // NOT add any more messages to the earlierMessages queue . // The MessageRouter basically owns the earlierMessages queue // from now on . if ( earlierMessages != null ) { synchronized ( this ) { if ( earlierMessages != null ) { msgRouter . setEarlierMessages ( earlierMessages ) ; } } } else { msgRouter . setEarlierMessages ( null ) ; }
public class BaseAsyncInterceptor { /** * Used internally to set up the interceptor . */ @ Override public final void setNextInterceptor ( AsyncInterceptor nextInterceptor ) { } }
this . nextInterceptor = nextInterceptor ; this . nextDDInterceptor = nextInterceptor instanceof DDAsyncInterceptor ? ( DDAsyncInterceptor ) nextInterceptor : null ;
public class AbstractProxyFactory { /** * Returns the constructor of the indirection handler class . * @ return The constructor for indirection handlers */ private synchronized Constructor getIndirectionHandlerConstructor ( ) { } }
if ( _indirectionHandlerConstructor == null ) { Class [ ] paramType = { PBKey . class , Identity . class } ; try { _indirectionHandlerConstructor = getIndirectionHandlerClass ( ) . getConstructor ( paramType ) ; } catch ( NoSuchMethodException ex ) { throw new MetadataException ( "The class " + _indirectionHandlerClass . getName ( ) + " specified for IndirectionHandlerClass" + " is required to have a public constructor with signature (" + PBKey . class . getName ( ) + ", " + Identity . class . getName ( ) + ")." ) ; } } return _indirectionHandlerConstructor ;
public class ApiOvhDedicatedCloud { /** * Get this object properties * REST : GET / dedicatedCloud / location / { pccZone } / hostProfile / { id } * @ param pccZone [ required ] Name of pccZone * @ param id [ required ] Id of Host profile */ public OvhHostProfile location_pccZone_hostProfile_id_GET ( String pccZone , Long id ) throws IOException { } }
String qPath = "/dedicatedCloud/location/{pccZone}/hostProfile/{id}" ; StringBuilder sb = path ( qPath , pccZone , id ) ; String resp = exec ( qPath , "GET" , sb . toString ( ) , null ) ; return convertTo ( resp , OvhHostProfile . class ) ;
public class SeaGlassLookAndFeel { /** * Initialize the spinner UI settings ; * @ param d the UI defaults map . */ private void defineSpinners ( UIDefaults d ) { } }
d . put ( "spinnerNextBorderBottomEnabled" , new Color ( 0x4779bf ) ) ; d . put ( "spinnerNextBorderBottomPressed" , new Color ( 0x4879bf ) ) ; d . put ( "spinnerNextInteriorBottomEnabled" , new Color ( 0x85abcf ) ) ; d . put ( "spinnerNextInteriorBottomPressed" , new Color ( 0x6e92b6 ) ) ; d . put ( "spinnerPrevBorderTopEnabled" , new Color ( 0x4778bf ) ) ; d . put ( "spinnerPrevInteriorTopEnabled" , new Color ( 0x81aed4 ) ) ; d . put ( "spinnerPrevInteriorBottomEnabled" , new Color ( 0xaad4f1 ) ) ; d . put ( "spinnerPrevInteriorPressedTop" , new Color ( 0x6c91b8 ) ) ; d . put ( "spinnerPrevInteriorPressedBottom" , new Color ( 0x9cc3de ) ) ; d . put ( "spinnerPrevTopLineEnabled" , new Color ( 0xacc8e0 ) ) ; d . put ( "spinnerPrevTopLinePressed" , new Color ( 0x9eb6cf ) ) ; d . put ( "Spinner.contentMargins" , new InsetsUIResource ( 4 , 6 , 4 , 6 ) ) ; d . put ( "Spinner:\"Spinner.editor\".contentMargins" , new InsetsUIResource ( 0 , 0 , 0 , 0 ) ) ; d . put ( "Spinner:\"Spinner.textField\".contentMargins" , new InsetsUIResource ( 4 , 6 , 4 , 0 ) ) ; d . put ( "Spinner:\"Spinner.formattedTextField\".contentMargins" , new InsetsUIResource ( 4 , 6 , 4 , 2 ) ) ; String c = PAINTER_PREFIX + "SpinnerFormattedTextFieldPainter" ; String p = "Spinner:Panel:\"Spinner.formattedTextField\"" ; d . put ( p + ".contentMargins" , new InsetsUIResource ( 3 , 10 , 3 , 2 ) ) ; d . put ( p + ".background" , Color . WHITE ) ; d . put ( p + "[Selected].textForeground" , Color . WHITE ) ; d . put ( p + "[Selected].textBackground" , d . get ( "seaGlassSelection" ) ) ; d . put ( p + "[Disabled].textForeground" , getDerivedColor ( "seaGlassDisabledText" , 0.0f , 0.0f , 0.0f , 0 , true ) ) ; d . put ( p + "[Disabled].backgroundPainter" , new LazyPainter ( c , SpinnerFormattedTextFieldPainter . Which . BACKGROUND_DISABLED ) ) ; d . put ( p + "[Enabled].backgroundPainter" , new LazyPainter ( c , SpinnerFormattedTextFieldPainter . Which . BACKGROUND_ENABLED ) ) ; d . put ( p + "[Focused].backgroundPainter" , new LazyPainter ( c , SpinnerFormattedTextFieldPainter . Which . BACKGROUND_FOCUSED ) ) ; d . put ( p + "[Selected].backgroundPainter" , new LazyPainter ( c , SpinnerFormattedTextFieldPainter . Which . BACKGROUND_SELECTED ) ) ; d . put ( p + "[Focused+Selected].backgroundPainter" , new LazyPainter ( c , SpinnerFormattedTextFieldPainter . Which . BACKGROUND_SELECTED_FOCUSED ) ) ; c = PAINTER_PREFIX + "SpinnerPreviousButtonPainter" ; p = "Spinner:\"Spinner.previousButton\"" ; d . put ( p + ".size" , new Integer ( 22 ) ) ; d . put ( p + ".States" , "Disabled,Enabled,Focused,Pressed" ) ; d . put ( p + "[Disabled].backgroundPainter" , new LazyPainter ( c , SpinnerPreviousButtonPainter . Which . BACKGROUND_DISABLED ) ) ; d . put ( p + "[Enabled].backgroundPainter" , new LazyPainter ( c , SpinnerPreviousButtonPainter . Which . BACKGROUND_ENABLED ) ) ; d . put ( p + "[Focused].backgroundPainter" , new LazyPainter ( c , SpinnerPreviousButtonPainter . Which . BACKGROUND_FOCUSED ) ) ; d . put ( p + "[Focused+Pressed].backgroundPainter" , new LazyPainter ( c , SpinnerPreviousButtonPainter . Which . BACKGROUND_PRESSED_FOCUSED ) ) ; d . put ( p + "[Pressed].backgroundPainter" , new LazyPainter ( c , SpinnerPreviousButtonPainter . Which . BACKGROUND_PRESSED ) ) ; d . put ( p + "[Disabled].foregroundPainter" , new LazyPainter ( c , SpinnerPreviousButtonPainter . Which . FOREGROUND_DISABLED ) ) ; d . put ( p + "[Enabled].foregroundPainter" , new LazyPainter ( c , SpinnerPreviousButtonPainter . Which . FOREGROUND_ENABLED ) ) ; d . put ( p + "[Focused].foregroundPainter" , new LazyPainter ( c , SpinnerPreviousButtonPainter . Which . FOREGROUND_FOCUSED ) ) ; d . put ( p + "[Focused+Pressed].foregroundPainter" , new LazyPainter ( c , SpinnerPreviousButtonPainter . Which . FOREGROUND_PRESSED_FOCUSED ) ) ; d . put ( p + "[Pressed].foregroundPainter" , new LazyPainter ( c , SpinnerPreviousButtonPainter . Which . FOREGROUND_PRESSED ) ) ; c = PAINTER_PREFIX + "SpinnerNextButtonPainter" ; p = "Spinner:\"Spinner.nextButton\"" ; d . put ( p + ".size" , new Integer ( 22 ) ) ; d . put ( p + ".States" , "Disabled,Enabled,Focused,Pressed" ) ; d . put ( p + "[Disabled].backgroundPainter" , new LazyPainter ( c , SpinnerNextButtonPainter . Which . BACKGROUND_DISABLED ) ) ; d . put ( p + "[Enabled].backgroundPainter" , new LazyPainter ( c , SpinnerNextButtonPainter . Which . BACKGROUND_ENABLED ) ) ; d . put ( p + "[Focused].backgroundPainter" , new LazyPainter ( c , SpinnerNextButtonPainter . Which . BACKGROUND_FOCUSED ) ) ; d . put ( p + "[Focused+Pressed].backgroundPainter" , new LazyPainter ( c , SpinnerNextButtonPainter . Which . BACKGROUND_PRESSED_FOCUSED ) ) ; d . put ( p + "[Pressed].backgroundPainter" , new LazyPainter ( c , SpinnerNextButtonPainter . Which . BACKGROUND_PRESSED ) ) ; d . put ( p + "[Disabled].foregroundPainter" , new LazyPainter ( c , SpinnerNextButtonPainter . Which . FOREGROUND_DISABLED ) ) ; d . put ( p + "[Enabled].foregroundPainter" , new LazyPainter ( c , SpinnerNextButtonPainter . Which . FOREGROUND_ENABLED ) ) ; d . put ( p + "[Focused].foregroundPainter" , new LazyPainter ( c , SpinnerNextButtonPainter . Which . FOREGROUND_FOCUSED ) ) ; d . put ( p + "[Focused+Pressed].foregroundPainter" , new LazyPainter ( c , SpinnerNextButtonPainter . Which . FOREGROUND_PRESSED_FOCUSED ) ) ; d . put ( p + "[Pressed].foregroundPainter" , new LazyPainter ( c , SpinnerNextButtonPainter . Which . FOREGROUND_PRESSED ) ) ;