signature stringlengths 43 39.1k | implementation stringlengths 0 450k |
|---|---|
public class BizwifiAPI { /** * 连Wi - Fi小程序 - 设置顶部banner跳转小程序接口
* 场景介绍 :
* 用户连Wi - Fi后长期逗留在场所内 , 可以在连接Wi - Fi后进入微信点击微信聊首页欢迎语 , 即可进入预先设置的小程序中获得资讯或服务 。
* 注 : 只能跳转与公众号关联的小程序 。
* @ param accessToken accessToken
* @ param homePageSet homePageSet
* @ return BaseResult */
public static BaseResult homepageSet ( String accessToken , HomePageSet homePageSet ) { } } | return homepageSet ( accessToken , JsonUtil . toJSONString ( homePageSet ) ) ; |
public class FactorMaxMarginalSet { /** * Performs a depth - first search of { @ code cliqueTree } , starting at
* { @ code factorNum } , to find an assignment with maximal probability . If
* multiple maximal probability assignments exist , this method returns an
* arbitrary one .
* @ param cliqueTree factor graph which is searched .
* @ param factorNum current factor to visit .
* @ param visitedFactors list of factors already visited by the depth - first
* search .
* @ param a the maximal probability assignment for the already visited
* factors .
* @ return */
private static Assignment getBestAssignmentGiven ( CliqueTree cliqueTree , int factorNum , Set < Integer > visitedFactors , Assignment a ) { } } | Factor curFactor = cliqueTree . getMarginal ( factorNum ) ; List < Assignment > bestAssignments = curFactor . conditional ( a ) . getMostLikelyAssignments ( 1 ) ; if ( bestAssignments . size ( ) == 0 ) { // This condition implies that the factor graph does not have a positive
// probability assignment .
throw new ZeroProbabilityError ( ) ; } Assignment best = bestAssignments . get ( 0 ) . union ( a ) ; if ( curFactor . getUnnormalizedLogProbability ( best ) == Double . NEGATIVE_INFINITY ) { throw new ZeroProbabilityError ( ) ; } visitedFactors . add ( factorNum ) ; for ( int adjacentFactorNum : cliqueTree . getNeighboringFactors ( factorNum ) ) { if ( ! visitedFactors . contains ( adjacentFactorNum ) ) { Assignment bestChild = getBestAssignmentGiven ( cliqueTree , adjacentFactorNum , visitedFactors , best ) . removeAll ( best . getVariableNumsArray ( ) ) ; best = best . union ( bestChild ) ; } } return best ; |
public class ByteAmount { /** * Creates a ByteAmount value in megabytes . If the megabytes value
* is $ lt ; = Long . MAX _ VALUE / 1024 / 1024 , the byte representation is capped at Long . MAX _ VALUE .
* @ param megabytes value in megabytes to represent
* @ return a ByteAmount object repressing the number of MBs passed */
public static ByteAmount fromMegabytes ( long megabytes ) { } } | if ( megabytes >= MAX_MB ) { return new ByteAmount ( Long . MAX_VALUE ) ; } else { return new ByteAmount ( megabytes * MB ) ; } |
public class CommerceShipmentItemPersistenceImpl { /** * Clears the cache for the commerce shipment item .
* The { @ link EntityCache } and { @ link FinderCache } are both cleared by this method . */
@ Override public void clearCache ( CommerceShipmentItem commerceShipmentItem ) { } } | entityCache . removeResult ( CommerceShipmentItemModelImpl . ENTITY_CACHE_ENABLED , CommerceShipmentItemImpl . class , commerceShipmentItem . getPrimaryKey ( ) ) ; finderCache . clearCache ( FINDER_CLASS_NAME_LIST_WITH_PAGINATION ) ; finderCache . clearCache ( FINDER_CLASS_NAME_LIST_WITHOUT_PAGINATION ) ; |
public class SequenceManagerHelper { /** * Database sequence properties helper method .
* Return sequence < em > cycle < / em > Booelan or < em > null < / em >
* if not set .
* @ param prop The { @ link java . util . Properties } instance to use .
* @ return The found expression or < em > null < / em > . */
public static Boolean getSeqCycleValue ( Properties prop ) { } } | String result = prop . getProperty ( PROP_SEQ_CYCLE , null ) ; if ( result != null ) { return Boolean . valueOf ( result ) ; } else { return null ; } |
public class RoadPolyline { /** * Replies the last connection point of this segment .
* @ param < CT > is the type of the connection to reply
* @ param connectionClass is the type of the connection to reply
* @ return the last point of < code > null < / code > */
@ Pure < CT extends RoadConnection > CT getEndPoint ( Class < CT > connectionClass ) { } } | final StandardRoadConnection connection = this . lastConnection ; if ( connection == null ) { return null ; } if ( connectionClass . isAssignableFrom ( StandardRoadConnection . class ) ) { return connectionClass . cast ( connection ) ; } if ( connectionClass . isAssignableFrom ( RoadConnectionWithArrivalSegment . class ) ) { return connectionClass . cast ( new RoadConnectionWithArrivalSegment ( connection , this , false ) ) ; } throw new IllegalArgumentException ( "unsupported RoadConnection class" ) ; // $ NON - NLS - 1 $ |
public class ClassUtil { /** * getValueOfField .
* @ param field
* a { @ link java . lang . reflect . Field } object .
* @ param ref
* a { @ link java . lang . Object } object .
* @ return a { @ link java . lang . Object } object . */
public static Object getValueOfField ( Field field , Object ref ) { } } | field . setAccessible ( true ) ; Object value = null ; try { value = field . get ( ref ) ; } catch ( IllegalArgumentException e ) { } catch ( IllegalAccessException e ) { } return value ; |
public class MapFixture { /** * Gets value from map .
* @ param name name of ( possibly nested ) property to get value from .
* @ param map map to get value from .
* @ return value found , if it could be found , null otherwise . */
public Object valueIn ( String name , Map < String , Object > map ) { } } | return getMapHelper ( ) . getValue ( map , name ) ; |
public class GeneratorUtil { /** * Computes foldername and creates the folders that does not already exist
* @ param inFolder , left out if null
* @ return */
private String getFolderAndCreateIfMissing ( String inFolder ) { } } | String folder = outputFolder ; if ( inFolder != null ) { folder += "/" + inFolder ; } mkdirs ( folder ) ; return folder ; |
public class CoverageTask { /** * Call the Coverage Task . */
public GridCoverage2D call ( ) { } } | try { BufferedImage coverageImage = this . tiledLayer . createBufferedImage ( this . tilePreparationInfo . getImageWidth ( ) , this . tilePreparationInfo . getImageHeight ( ) ) ; Graphics2D graphics = coverageImage . createGraphics ( ) ; try { for ( SingleTilePreparationInfo tileInfo : this . tilePreparationInfo . getSingleTiles ( ) ) { final TileTask task ; if ( tileInfo . getTileRequest ( ) != null ) { task = new SingleTileLoaderTask ( tileInfo . getTileRequest ( ) , this . errorImage , tileInfo . getTileIndexX ( ) , tileInfo . getTileIndexY ( ) , this . failOnError , this . registry , this . context ) ; } else { task = new PlaceHolderImageTask ( this . tiledLayer . getMissingTileImage ( ) , tileInfo . getTileIndexX ( ) , tileInfo . getTileIndexY ( ) ) ; } Tile tile = task . call ( ) ; if ( tile . getImage ( ) != null ) { graphics . drawImage ( tile . getImage ( ) , tile . getxIndex ( ) * this . tiledLayer . getTileSize ( ) . width , tile . getyIndex ( ) * this . tiledLayer . getTileSize ( ) . height , null ) ; } } } finally { graphics . dispose ( ) ; } GridCoverageFactory factory = CoverageFactoryFinder . getGridCoverageFactory ( null ) ; GeneralEnvelope gridEnvelope = new GeneralEnvelope ( this . tilePreparationInfo . getMapProjection ( ) ) ; gridEnvelope . setEnvelope ( this . tilePreparationInfo . getGridCoverageOrigin ( ) . x , this . tilePreparationInfo . getGridCoverageOrigin ( ) . y , this . tilePreparationInfo . getGridCoverageMaxX ( ) , this . tilePreparationInfo . getGridCoverageMaxY ( ) ) ; return factory . create ( this . tiledLayer . createCommonUrl ( ) , coverageImage , gridEnvelope , null , null , null ) ; } catch ( Exception e ) { throw ExceptionUtils . getRuntimeException ( e ) ; } |
public class Matrix { /** * create and return the N - by - N identity matrix */
public static Matrix identity ( int N ) { } } | Matrix I = new Matrix ( N , N ) ; for ( int i = 0 ; i < N ; i ++ ) { I . data [ i ] [ i ] = 1 ; } return I ; |
public class MemcacheUtils { /** * Parse Memcache key into ( MapName , Key ) pair . */
public static MapNameAndKeyPair parseMemcacheKey ( String key ) { } } | key = decodeKey ( key , "UTF-8" ) ; String mapName = DEFAULT_MAP_NAME ; int index = key . indexOf ( ':' ) ; if ( index != - 1 ) { mapName = MAP_NAME_PREFIX + key . substring ( 0 , index ) ; key = key . substring ( index + 1 ) ; } return new MapNameAndKeyPair ( mapName , key ) ; |
public class PropertyMappingPanel { /** * GEN - LAST : event _ cmdAddActionPerformed */
private void showPopup ( MouseEvent evt ) { } } | if ( evt . isPopupTrigger ( ) ) { popMenu . show ( evt . getComponent ( ) , evt . getX ( ) , evt . getY ( ) ) ; } |
public class Utils { /** * 根据属性名与属性类型获取字段内容
* @ param bean 对象
* @ param name 字段名
* @ param value 字段类型 */
public static void copyProperty ( Object bean , String name , Object value ) throws Excel4JException { } } | if ( null == name || null == value ) return ; Field field = matchClassField ( bean . getClass ( ) , name ) ; if ( null == field ) return ; Method method ; try { method = getterOrSetter ( bean . getClass ( ) , name , FieldAccessType . SETTER ) ; if ( value . getClass ( ) == field . getType ( ) ) { method . invoke ( bean , value ) ; } else { method . invoke ( bean , str2TargetClass ( value . toString ( ) , field . getType ( ) ) ) ; } } catch ( IntrospectionException | IllegalAccessException | InvocationTargetException e ) { throw new Excel4JException ( e ) ; } |
public class QuartzSchedulerResources { /** * Set the name for the < code > { @ link QuartzSchedulerThread } < / code > .
* @ exception IllegalArgumentException
* if name is null or empty . */
public void setThreadName ( final String threadName ) { } } | if ( threadName == null || threadName . trim ( ) . length ( ) == 0 ) { throw new IllegalArgumentException ( "Scheduler thread name cannot be empty." ) ; } m_sThreadName = threadName ; |
public class NearestEdgeSnapAlgorithm { /** * Execute the snap operation .
* @ param coordinate The original location .
* @ param distance The maximum distance allowed for snapping .
* @ return The new location . If no snapping target was found , this may return the original location . */
public Coordinate snap ( Coordinate coordinate , double distance ) { } } | // Some initialization :
calculatedDistance = distance ; hasSnapped = false ; Coordinate snappingPoint = coordinate ; // Calculate the distances for all coordinate arrays :
for ( Coordinate [ ] coordinateArray : coordinates ) { if ( coordinateArray . length > 1 ) { for ( int j = 1 ; j < coordinateArray . length ; j ++ ) { double d = MathService . distance ( coordinateArray [ j ] , coordinateArray [ j - 1 ] , coordinate ) ; if ( d < calculatedDistance || ( d == calculatedDistance && ! hasSnapped ) ) { snappingPoint = MathService . nearest ( coordinateArray [ j ] , coordinateArray [ j - 1 ] , coordinate ) ; calculatedDistance = d ; hasSnapped = true ; } } } else if ( coordinateArray . length == 1 ) { // In the case of Points , see if we can snap to them :
double d = MathService . distance ( coordinateArray [ 0 ] , coordinate ) ; if ( d < calculatedDistance ) { snappingPoint = coordinateArray [ 0 ] ; calculatedDistance = d ; hasSnapped = true ; } } } return snappingPoint ; |
public class DefaultGroovyMethods { /** * Swaps two elements at the specified positions .
* Example :
* < pre class = " groovyTestCase " >
* assert ( [ " a " , " c " , " b " , " d " ] as String [ ] ) = = ( [ " a " , " b " , " c " , " d " ] as String [ ] ) . swap ( 1 , 2)
* < / pre >
* @ param self an array
* @ param i a position
* @ param j a position
* @ return self
* @ since 2.4.0 */
public static < T > T [ ] swap ( T [ ] self , int i , int j ) { } } | T tmp = self [ i ] ; self [ i ] = self [ j ] ; self [ j ] = tmp ; return self ; |
public class PropertyInfo { /** * This is a convenience method for returning a named configuration value that is expected to be
* a double floating point number .
* @ param key The configuration value ' s key .
* @ param dflt Default value .
* @ return Configuration value as a double or default value if not found or not a valid double . */
public Double getConfigValueDouble ( String key , Double dflt ) { } } | try { return Double . parseDouble ( getConfigValue ( key ) ) ; } catch ( Exception e ) { return dflt ; } |
public class PriorityQueue { /** * Blocks until a close operation has completed . I . e . the priority queue has been
* drained . If the queue is already closed , this method returns immeidately . */
public void waitForCloseToComplete ( ) { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "waitForCloseToComplete" ) ; closeWaitersMonitor . waitOn ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "waitForCloseToComplete" ) ; |
public class JDBCConnection { /** * < ! - - start generic documentation - - >
* Creates a < code > Statement < / code > object that will generate
* < code > ResultSet < / code > objects with the given type and concurrency .
* This method is the same as the < code > createStatement < / code > method
* above , but it allows the default result set
* type and concurrency to be overridden .
* The holdability of the created result sets can be determined by
* calling { @ link # getHoldability } .
* < ! - - end generic documentation - - >
* < ! - - start release - specific documentation - - >
* < div class = " ReleaseSpecificDocumentation " >
* < h3 > HSQLDB - Specific Information : < / h3 > < p >
* HSQLDB 1.9.0 adheres closely to SQL and JDBC standards . The
* interpretation of of resultSetType and resultSetConcurrency has
* changed in this version . < p >
* HSQLDB supports < code > TYPE _ FORWARD _ ONLY < / code > ,
* < code > TYPE _ SCROLL _ INSENSITIVE < / code > ,
* < code > CONCUR _ READ _ ONLY < / code > ,
* < code > CONCUR _ UPDATABLE < / code >
* results . < p >
* If an unsupported combination is requested , a SQLWarning is issued on
* this Connection and the closest supported combination is used instead . < p >
* < / div > < ! - - end release - specific documentation - - >
* @ param resultSetType a result set type ; one of
* < code > ResultSet . TYPE _ FORWARD _ ONLY < / code > ,
* < code > ResultSet . TYPE _ SCROLL _ INSENSITIVE < / code > , or
* < code > ResultSet . TYPE _ SCROLL _ SENSITIVE < / code >
* @ param resultSetConcurrency a concurrency type ; one of
* < code > ResultSet . CONCUR _ READ _ ONLY < / code > or
* < code > ResultSet . CONCUR _ UPDATABLE < / code >
* @ return a new < code > Statement < / code > object that will generate
* < code > ResultSet < / code > objects with the given type and
* concurrency
* @ exception SQLException if a database access error occurs , this
* ( JDBC4 Clarification : )
* method is called on a closed connection
* ( : JDBC4 Clarification )
* or the given parameters are not < code > ResultSet < / code >
* constants indicating type and concurrency
* @ exception SQLFeatureNotSupportedException if the JDBC driver does not support
* this method or this method is not supported for the specified result
* set type and result set concurrency .
* @ since JDK 1.2 ( JDK 1.1 . x developers : read the overview
* for JDBCConnection ) */
public synchronized Statement createStatement ( int resultSetType , int resultSetConcurrency ) throws SQLException { } } | checkClosed ( ) ; return new JDBCStatement ( this , resultSetType , resultSetConcurrency , rsHoldability ) ; |
public class UpdateChecker { /** * Show the Notice only if it ' s the first time or the number of the checks made is a multiple of the argument of setSuccessfulChecksRequired ( int ) method . ( If you don ' t call setSuccessfulChecksRequired ( int ) the default is 5 ) . */
private boolean hasToShowNotice ( String versionDownloadable ) { } } | SharedPreferences prefs = mActivity . getSharedPreferences ( PREFS_FILENAME , 0 ) ; String prefKey = SUCCESSFUL_CHEKS_PREF_KEY + versionDownloadable ; int mChecksMade = prefs . getInt ( prefKey , 0 ) ; if ( mChecksMade % mSuccessfulChecksRequired == 0 || mChecksMade == 0 ) { saveNumberOfChecksForUpdatedVersion ( versionDownloadable , mChecksMade ) ; return true ; } else { saveNumberOfChecksForUpdatedVersion ( versionDownloadable , mChecksMade ) ; return false ; } |
public class FeatureList { /** * Concatenate successive portions of the specified sequence
* using the feature locations in the list . The list is assumed to be appropriately
* ordered .
* @ param sequence The source sequence from which portions should be selected .
* @ return The spliced data .
* @ throws IllegalStateException Out of order or overlapping FeatureI locations detected . */
public String splice ( DNASequence sequence ) { } } | StringBuilder subData = new StringBuilder ( ) ; Location last = null ; for ( FeatureI f : this ) { Location loc = f . location ( ) ; if ( last == null || loc . startsAfter ( last ) ) { subData . append ( sequence . getSubSequence ( loc . start ( ) , loc . end ( ) ) . toString ( ) ) ; last = loc ; } else { throw new IllegalStateException ( "Splice: Feature locations should not overlap." ) ; } } return subData . toString ( ) ; |
public class SQLiteDatabaseSchema { /** * Fill clazz .
* @ param configClazz
* the config clazz
* @ param clazz
* the clazz
* @ return the string */
private String fillClazz ( String configClazz , String clazz ) { } } | if ( ! clazz . equals ( configClazz ) ) { return configClazz ; } else { return null ; } |
public class UUID { /** * Static factory to retrieve a type 4 ( pseudo randomly generated ) UUID .
* The < code > UUID < / code > is generated using a cryptographically strong
* pseudo random number generator .
* Source code was got from sources of java
* @ return a randomly generated < tt > UUID < / tt > . */
public static byte [ ] randomUUID ( ) { } } | SecureRandom ng = numberGenerator ; if ( ng == null ) { numberGenerator = ng = new SecureRandom ( ) ; } byte [ ] randomBytes = new byte [ 16 ] ; ng . nextBytes ( randomBytes ) ; randomBytes [ 6 ] &= 0x0f ; /* clear version */
randomBytes [ 6 ] |= 0x40 ; /* set to version 4 */
randomBytes [ 8 ] &= 0x3f ; /* clear variant */
randomBytes [ 8 ] |= 0x80 ; /* set to IETF variant */
return randomBytes ; |
public class AutoscalePolicyService { /** * remove autoscale policy on server
* @ param server server
* @ return OperationFuture wrapper for server */
public OperationFuture < Server > removeAutoscalePolicyOnServer ( Server server ) { } } | autoscalePolicyClient . removeAutoscalePolicyOnServer ( serverService . findByRef ( server ) . getId ( ) ) ; return new OperationFuture < > ( server , new NoWaitingJobFuture ( ) ) ; |
public class ResourceFinder { /** * Reads the contents of all non - directory URLs immediately under the specified
* location and returns them in a map keyed by the file name .
* Any URLs that cannot be read will cause an exception to be thrown .
* Example classpath :
* META - INF / serializables / one
* META - INF / serializables / two
* META - INF / serializables / three
* META - INF / serializables / four / foo . txt
* ResourceFinder finder = new ResourceFinder ( " META - INF / " ) ;
* Map map = finder . mapAvailableStrings ( " serializables " ) ;
* map . contains ( " one " ) ; / / true
* map . contains ( " two " ) ; / / true
* map . contains ( " three " ) ; / / true
* map . contains ( " four " ) ; / / false
* @ param uri
* @ return a list of the content of each resource URL found
* @ throws IOException if any of the urls cannot be read */
public Map < String , String > mapAllStrings ( String uri ) throws IOException { } } | Map < String , String > strings = new HashMap < > ( ) ; Map < String , URL > resourcesMap = getResourcesMap ( uri ) ; for ( Iterator iterator = resourcesMap . entrySet ( ) . iterator ( ) ; iterator . hasNext ( ) ; ) { Map . Entry entry = ( Map . Entry ) iterator . next ( ) ; String name = ( String ) entry . getKey ( ) ; URL url = ( URL ) entry . getValue ( ) ; String value = readContents ( url ) ; strings . put ( name , value ) ; } return strings ; |
public class ValidateMetadataDeployer { /** * { @ inheritDoc } */
public Deployment deploy ( URL url , Context context , ClassLoader parent ) throws DeployException { } } | Connector c = ( Connector ) context . get ( Constants . ATTACHMENT_MERGED_METADATA ) ; if ( c == null ) c = ( Connector ) context . get ( Constants . ATTACHMENT_RA_XML_METADATA ) ; if ( c == null ) throw new DeployException ( "No metadata for " + url . toExternalForm ( ) + " found" ) ; try { c . validate ( ) ; return null ; } catch ( Throwable t ) { throw new DeployException ( "Deployment " + url . toExternalForm ( ) + " failed" , t ) ; } |
public class MatFileReader { /** * Reads MAT - file header .
* Modifies < code > buf < / code > position .
* @ param buf
* < code > ByteBuffer < / code >
* @ throws IOException
* if reading from buffer fails or if this is not a valid
* MAT - file */
private void readHeader ( ByteBuffer buf ) throws IOException { } } | // header values
String description ; int version ; byte [ ] endianIndicator = new byte [ 2 ] ; // This part of the header is missing if the file isn ' t a regular mat file . So ignore .
if ( matType == MatFileType . Regular ) { // descriptive text 116 bytes
byte [ ] descriptionBuffer = new byte [ 116 ] ; buf . get ( descriptionBuffer ) ; description = zeroEndByteArrayToString ( descriptionBuffer ) ; if ( ! description . matches ( "MATLAB 5.0 MAT-file.*" ) ) { throw new MatlabIOException ( "This is not a valid MATLAB 5.0 MAT-file." ) ; } // subsyst data offset 8 bytes
buf . position ( buf . position ( ) + 8 ) ; } else { description = "Simulink generated MATLAB 5.0 MAT-file" ; // Default simulink description .
} byte [ ] bversion = new byte [ 2 ] ; // version 2 bytes
buf . get ( bversion ) ; // endian indicator 2 bytes
buf . get ( endianIndicator ) ; // program reading the MAT - file must perform byte swapping to interpret the data
// in the MAT - file correctly
if ( ( char ) endianIndicator [ 0 ] == 'I' && ( char ) endianIndicator [ 1 ] == 'M' ) { byteOrder = ByteOrder . LITTLE_ENDIAN ; version = bversion [ 1 ] & 0xff | bversion [ 0 ] << 8 ; } else { byteOrder = ByteOrder . BIG_ENDIAN ; version = bversion [ 0 ] & 0xff | bversion [ 1 ] << 8 ; } buf . order ( byteOrder ) ; matFileHeader = new MatFileHeader ( description , version , endianIndicator , byteOrder ) ; // After the header , the next read must be aligned . Thus force the alignment . Only matters with reduced header data ,
// but apply it regardless for safety .
buf . position ( ( buf . position ( ) + 7 ) & 0xfffffff8 ) ; |
public class Async { /** * Convert a synchronous function call into an asynchronous function call through an Observable .
* < img width = " 640 " src = " https : / / raw . github . com / wiki / ReactiveX / RxJava / images / rx - operators / toAsync . png " alt = " " >
* @ param < R > the result type
* @ param func the function to convert
* @ return a function that returns an Observable that executes the { @ code func } and emits its returned value
* @ see < a href = " https : / / github . com / ReactiveX / RxJava / wiki / Async - Operators # wiki - toasync - or - asyncaction - or - asyncfunc " > RxJava Wiki : toAsync ( ) < / a > */
public static < R > FuncN < Observable < R > > toAsync ( FuncN < ? extends R > func ) { } } | return toAsync ( func , Schedulers . computation ( ) ) ; |
public class ProcessedInput { /** * Returns parameter name by specifying it ' s position
* @ param position position of parameter
* @ return name of parameter , null if list of names is empty */
public String getParameterName ( Integer position ) { } } | String name = null ; if ( this . sqlParameterNames != null ) { name = this . sqlParameterNames . get ( position ) ; } return name ; |
public class ProtoLexer { /** * $ ANTLR start " SEMICOLON " */
public final void mSEMICOLON ( ) throws RecognitionException { } } | try { int _type = SEMICOLON ; int _channel = DEFAULT_TOKEN_CHANNEL ; // com / dyuproject / protostuff / parser / ProtoLexer . g : 67:5 : ( ' ; ' )
// com / dyuproject / protostuff / parser / ProtoLexer . g : 67:9 : ' ; '
{ match ( ';' ) ; } state . type = _type ; state . channel = _channel ; } finally { } |
public class CloudantDatabaseService { /** * Invoked when a cloudant Database is injected or looked up .
* @ param info resource ref info , or null if direct lookup .
* @ return instance of com . cloudant . client . api . Database */
@ Override public Object createResource ( ResourceInfo info ) throws Exception { } } | ComponentMetaData cData = ComponentMetaDataAccessorImpl . getComponentMetaDataAccessor ( ) . getComponentMetaData ( ) ; if ( cData != null ) applications . add ( cData . getJ2EEName ( ) . getApplication ( ) ) ; return cloudantSvc . createResource ( ( String ) props . get ( "databaseName" ) , ( Boolean ) props . get ( "create" ) , info == null ? ResourceInfo . AUTH_APPLICATION : info . getAuth ( ) , info == null ? null : info . getLoginPropertyList ( ) ) ; |
public class ApiOvhDomain { /** * Get this object properties
* REST : GET / domain / zone / { zoneName } / redirection / { id }
* @ param zoneName [ required ] The internal name of your zone
* @ param id [ required ] Id of the object */
public OvhRedirection zone_zoneName_redirection_id_GET ( String zoneName , Long id ) throws IOException { } } | String qPath = "/domain/zone/{zoneName}/redirection/{id}" ; StringBuilder sb = path ( qPath , zoneName , id ) ; String resp = exec ( qPath , "GET" , sb . toString ( ) , null ) ; return convertTo ( resp , OvhRedirection . class ) ; |
public class DOTranslationUtility { /** * Certain serviceDeployment datastreams require special processing to
* fix / complete URLs and do variable substitution ( such as replacing
* ' local . fedora . server ' with fedora ' s baseURL ) */
public void normalizeDatastreams ( DigitalObject obj , int transContext , String characterEncoding ) throws UnsupportedEncodingException { } } | if ( transContext == AS_IS ) { return ; } if ( obj . hasContentModel ( Models . SERVICE_DEPLOYMENT_3_0 ) ) { Iterator < String > datastreams = obj . datastreamIdIterator ( ) ; while ( datastreams . hasNext ( ) ) { String dsid = datastreams . next ( ) ; if ( dsid . equals ( "WSDL" ) || dsid . equals ( "SERVICE-PROFILE" ) ) { for ( Datastream d : obj . datastreams ( dsid ) ) { if ( ! ( d instanceof DatastreamXMLMetadata ) ) { logger . warn ( obj . getPid ( ) + " : Refusing to normalize URLs in datastream " + dsid + " because it is not inline XML" ) ; continue ; } DatastreamXMLMetadata xd = ( DatastreamXMLMetadata ) d ; if ( logger . isDebugEnabled ( ) ) logger . debug ( "{} : normalising URLs in {}" , obj . getPid ( ) , dsid ) ; String origContent = new String ( xd . xmlContent , "UTF-8" ) ; String normal = normalizeInlineXML ( origContent , transContext ) ; if ( ! normal . equals ( origContent ) || ! "UTF-8" . equalsIgnoreCase ( characterEncoding ) ) { xd . xmlContent = normal . getBytes ( characterEncoding ) ; } xd . DSSize = xd . xmlContent . length ; } } } } |
public class JdbcTable { /** * This is a special method - If the db doesn ' t allow multiple keys indexes ,
* see if this field is a first field on an index ( non - primary ) .
* @ return true if true . */
public boolean checkIndexField ( BaseField field ) { } } | for ( int iKeySeq = 0 ; iKeySeq < this . getRecord ( ) . getKeyAreaCount ( ) ; iKeySeq ++ ) { KeyArea keyArea = this . getRecord ( ) . getKeyArea ( iKeySeq ) ; if ( keyArea . getField ( 0 ) == field ) if ( field != this . getRecord ( ) . getCounterField ( ) ) return true ; } return false ; |
public class TelemetryUtils { /** * Do not call this method outside of activity ! ! ! */
public static String retrieveVendorId ( ) { } } | if ( MapboxTelemetry . applicationContext == null ) { return updateVendorId ( ) ; } SharedPreferences sharedPreferences = obtainSharedPreferences ( MapboxTelemetry . applicationContext ) ; String mapboxVendorId = sharedPreferences . getString ( MAPBOX_SHARED_PREFERENCE_KEY_VENDOR_ID , "" ) ; if ( TelemetryUtils . isEmpty ( mapboxVendorId ) ) { mapboxVendorId = TelemetryUtils . updateVendorId ( ) ; } return mapboxVendorId ; |
public class GobblinYarnAppLauncher { /** * Stop this { @ link GobblinYarnAppLauncher } instance .
* @ throws IOException if this { @ link GobblinYarnAppLauncher } instance fails to clean up its working directory . */
public synchronized void stop ( ) throws IOException , TimeoutException { } } | if ( this . stopped ) { return ; } LOGGER . info ( "Stopping the " + GobblinYarnAppLauncher . class . getSimpleName ( ) ) ; try { if ( this . applicationId . isPresent ( ) && ! this . applicationCompleted ) { // Only send the shutdown message if the application has been successfully submitted and is still running
sendShutdownRequest ( ) ; } if ( this . serviceManager . isPresent ( ) ) { this . serviceManager . get ( ) . stopAsync ( ) . awaitStopped ( 5 , TimeUnit . MINUTES ) ; } ExecutorsUtils . shutdownExecutorService ( this . applicationStatusMonitor , Optional . of ( LOGGER ) , 5 , TimeUnit . MINUTES ) ; stopYarnClient ( ) ; disconnectHelixManager ( ) ; } finally { try { if ( this . applicationId . isPresent ( ) ) { cleanUpAppWorkDirectory ( this . applicationId . get ( ) ) ; } } finally { this . closer . close ( ) ; } } this . stopped = true ; |
public class CmsJspNavElement { /** * Helper to get locale specific properties .
* @ return the locale specific properties map . */
private Map < String , String > getLocaleProperties ( ) { } } | if ( m_localeProperties == null ) { m_localeProperties = CmsCollectionsGenericWrapper . createLazyMap ( new CmsProperty . CmsPropertyLocaleTransformer ( m_properties , m_locale ) ) ; } return m_localeProperties ; |
public class Providers { /** * Provide a Provider from the resource found in class loader with the provided encoding . < br / > As resource is
* accessed through a class loader , a leading " / " is not allowed in pathToResource */
public static Provider resourceProvider ( ClassLoader classLoader , String pathToResource , Charset encoding ) throws IOException { } } | InputStream resourceAsStream = classLoader . getResourceAsStream ( pathToResource ) ; if ( resourceAsStream == null ) { throw new IOException ( "Cannot find " + pathToResource ) ; } return provider ( resourceAsStream , encoding ) ; |
public class WCOutputStream { /** * @ see javax . servlet . ServletOutputStream # println ( char ) */
public void println ( char c ) throws IOException { } } | this . singleByte [ 0 ] = ( byte ) c ; this . output . write ( this . singleByte , 0 , 1 ) ; this . output . write ( CRLF , 0 , 2 ) ; |
public class MarketApi { /** * List historical orders from a corporation ( asynchronously ) List cancelled
* and expired market orders placed on behalf of a corporation up to 90 days
* in the past . - - - This route is cached for up to 3600 seconds - - - Requires
* one of the following EVE corporation role ( s ) : Accountant , Trader SSO
* Scope : esi - markets . read _ corporation _ orders . v1
* @ param corporationId
* An EVE corporation ID ( required )
* @ param datasource
* The server name you would like data from ( optional , default to
* tranquility )
* @ param ifNoneMatch
* ETag from a previous request . A 304 will be returned if this
* matches the current ETag ( optional )
* @ param page
* Which page of results to return ( optional , default to 1)
* @ param token
* Access token to use if unable to set a header ( optional )
* @ param callback
* The callback to be executed when the API call finishes
* @ return The request call
* @ throws ApiException
* If fail to process the API call , e . g . serializing the request
* body object */
public com . squareup . okhttp . Call getCorporationsCorporationIdOrdersHistoryAsync ( Integer corporationId , String datasource , String ifNoneMatch , Integer page , String token , final ApiCallback < List < CorporationOrdersHistoryResponse > > callback ) throws ApiException { } } | com . squareup . okhttp . Call call = getCorporationsCorporationIdOrdersHistoryValidateBeforeCall ( corporationId , datasource , ifNoneMatch , page , token , callback ) ; Type localVarReturnType = new TypeToken < List < CorporationOrdersHistoryResponse > > ( ) { } . getType ( ) ; apiClient . executeAsync ( call , localVarReturnType , callback ) ; return call ; |
public class FindNullDeref { /** * We have a method invocation in which a possibly or definitely null
* parameter is passed . Check it against the library of nonnull annotations .
* @ param location
* @ param cpg
* @ param typeDataflow
* @ param invokeInstruction
* @ param nullArgSet
* @ param definitelyNullArgSet */
private void checkNonNullParam ( Location location , ConstantPoolGen cpg , TypeDataflow typeDataflow , InvokeInstruction invokeInstruction , BitSet nullArgSet , BitSet definitelyNullArgSet ) { } } | if ( inExplicitCatchNullBlock ( location ) ) { return ; } boolean caught = inIndirectCatchNullBlock ( location ) ; if ( caught && skipIfInsideCatchNull ( ) ) { return ; } if ( invokeInstruction instanceof INVOKEDYNAMIC ) { return ; } XMethod m = XFactory . createXMethod ( invokeInstruction , cpg ) ; INullnessAnnotationDatabase db = AnalysisContext . currentAnalysisContext ( ) . getNullnessAnnotationDatabase ( ) ; SignatureParser sigParser = new SignatureParser ( invokeInstruction . getSignature ( cpg ) ) ; for ( int i = nullArgSet . nextSetBit ( 0 ) ; i >= 0 ; i = nullArgSet . nextSetBit ( i + 1 ) ) { if ( db . parameterMustBeNonNull ( m , i ) ) { boolean definitelyNull = definitelyNullArgSet . get ( i ) ; if ( DEBUG_NULLARG ) { System . out . println ( "Checking " + m ) ; System . out . println ( "QQQ2: " + i + " -- " + i + " is null" ) ; System . out . println ( "QQQ nullArgSet: " + nullArgSet ) ; System . out . println ( "QQQ dnullArgSet: " + definitelyNullArgSet ) ; } BugAnnotation variableAnnotation = null ; try { ValueNumberFrame vnaFrame = classContext . getValueNumberDataflow ( method ) . getFactAtLocation ( location ) ; ValueNumber valueNumber = vnaFrame . getArgument ( invokeInstruction , cpg , i , sigParser ) ; variableAnnotation = ValueNumberSourceInfo . findAnnotationFromValueNumber ( method , location , valueNumber , vnaFrame , "VALUE_OF" ) ; } catch ( DataflowAnalysisException e ) { AnalysisContext . logError ( "error" , e ) ; } catch ( CFGBuilderException e ) { AnalysisContext . logError ( "error" , e ) ; } int priority = definitelyNull ? HIGH_PRIORITY : NORMAL_PRIORITY ; if ( caught ) { priority ++ ; } if ( m . isPrivate ( ) && priority == HIGH_PRIORITY ) { priority = NORMAL_PRIORITY ; } String description = definitelyNull ? "INT_NULL_ARG" : "INT_MAYBE_NULL_ARG" ; WarningPropertySet < WarningProperty > propertySet = new WarningPropertySet < > ( ) ; Set < Location > derefLocationSet = Collections . singleton ( location ) ; addPropertiesForDereferenceLocations ( propertySet , derefLocationSet , false ) ; boolean duplicated = isDuplicated ( propertySet , location . getHandle ( ) . getPosition ( ) , false ) ; if ( duplicated ) { return ; } BugInstance warning = new BugInstance ( this , "NP_NONNULL_PARAM_VIOLATION" , priority ) . addClassAndMethod ( classContext . getJavaClass ( ) , method ) . addMethod ( m ) . describe ( MethodAnnotation . METHOD_CALLED ) . addParameterAnnotation ( i , description ) . addOptionalAnnotation ( variableAnnotation ) . addSourceLine ( classContext , method , location ) ; propertySet . decorateBugInstance ( warning ) ; bugReporter . reportBug ( warning ) ; } } |
public class Util { /** * Transforms a stream into a string .
* @ param is
* the stream to be transformed
* @ param charsetName
* encoding of the file
* @ return the string containing the content of the stream */
public static String streamToString ( InputStream is , String charsetName ) { } } | try { Reader r = null ; try { r = new BufferedReader ( new InputStreamReader ( is , charsetName ) ) ; return readerToString ( r ) ; } finally { if ( r != null ) { try { r . close ( ) ; } catch ( IOException e ) { } } } } catch ( Exception e ) { throw new RuntimeException ( e ) ; } |
public class ManifestFileProcessor { /** * This API for install used only . */
public Map < String , ProvisioningFeatureDefinition > getCoreFeatureDefinitionsExceptPlatform ( ) { } } | Map < String , ProvisioningFeatureDefinition > features = new TreeMap < String , ProvisioningFeatureDefinition > ( ) ; File featureDir = getCoreFeatureDir ( ) ; // the feature directory may not exist if the packaged server had no features installed when minified
if ( ! featureDir . isDirectory ( ) && ! featureDir . mkdir ( ) ) { throw new FeatureToolException ( "Unable to find or create feature directory: " + featureDir , MessageFormat . format ( NLS . messages . getString ( "tool.feature.dir.not.found" ) , featureDir ) , null , ReturnCode . MISSING_CONTENT ) ; } File [ ] manifestFiles = featureDir . listFiles ( MFFilter ) ; if ( manifestFiles != null ) { for ( File file : manifestFiles ) { try { ProvisioningFeatureDefinition fd = new SubsystemFeatureDefinitionImpl ( ExtensionConstants . CORE_EXTENSION , file ) ; if ( fd . isSupportedFeatureVersion ( ) ) { // using symbolic name because gets compared to FeatureResource symbolic name
features . put ( fd . getSymbolicName ( ) , fd ) ; } } catch ( IOException e ) { // TODO : PROPER NLS MESSAGE
throw new FeatureToolException ( "Unable to read core feature manifest: " + file , ( String ) null , e , ReturnCode . BAD_FEATURE_DEFINITION ) ; } } } return features ; |
public class SequenceGibbsSampler { /** * Samples the complete sequence once in the forward direction
* Destructively modifies the sequence in place .
* @ param sequence the sequence to start with . */
public void sampleSequenceForward ( SequenceModel model , int [ ] sequence , double temperature ) { } } | // System . err . println ( " Sampling forward " ) ;
for ( int pos = 0 ; pos < sequence . length ; pos ++ ) { samplePosition ( model , sequence , pos , temperature ) ; } |
public class QueryParserBase { /** * Append field list to { @ link SolrQuery }
* @ param solrQuery
* @ param fields */
protected void appendProjectionOnFields ( SolrQuery solrQuery , List < Field > fields , @ Nullable Class < ? > domainType ) { } } | if ( CollectionUtils . isEmpty ( fields ) ) { return ; } List < String > solrReadableFields = new ArrayList < > ( ) ; for ( Field field : fields ) { if ( field instanceof CalculatedField ) { solrReadableFields . add ( createCalculatedFieldFragment ( ( CalculatedField ) field , domainType ) ) ; } else { solrReadableFields . add ( getMappedFieldName ( field , domainType ) ) ; } } solrQuery . setParam ( CommonParams . FL , StringUtils . join ( solrReadableFields , "," ) ) ; |
public class ExpressionDecomposer { /** * Finds the statement containing { @ code subExpression } .
* < p > If { @ code subExpression } is not contained by a statement where inlining is known to be
* possible , { @ code null } is returned . For example , the condition expression of a WHILE loop . */
@ Nullable private static Node findExpressionRoot ( Node subExpression ) { } } | Node child = subExpression ; for ( Node current : child . getAncestors ( ) ) { Node parent = current . getParent ( ) ; switch ( current . getToken ( ) ) { // Supported expression roots :
// SWITCH and IF can have multiple children , but the CASE , DEFAULT ,
// or BLOCK will be encountered first for any of the children other
// than the condition .
case EXPR_RESULT : case IF : case SWITCH : case RETURN : case THROW : Preconditions . checkState ( child . isFirstChildOf ( current ) ) ; return current ; case VAR : // Normalization will remove LABELs from VARs .
case LET : case CONST : if ( NodeUtil . isAnyFor ( parent ) ) { break ; // Name declarations may not be roots if they ' re for - loop initializers .
} return current ; // Any of these indicate an unsupported expression :
case FOR : if ( child . isFirstChildOf ( current ) ) { // Only the initializer of a for - loop could possibly be decomposed since the other
// statements need to execute each iteration .
return current ; } // fall through
case FOR_IN : case FOR_OF : case FOR_AWAIT_OF : case DO : case WHILE : case SCRIPT : case BLOCK : case LABEL : case CASE : case DEFAULT_CASE : case PARAM_LIST : return null ; default : break ; } child = current ; } throw new IllegalStateException ( "Unexpected AST structure." ) ; |
public class RTMPConnection { /** * Return channel id for given stream id .
* @ param streamId
* Stream id
* @ return ID of channel that belongs to the stream */
public int getChannelIdForStreamId ( Number streamId ) { } } | int channelId = ( int ) ( streamId . doubleValue ( ) * 5 ) - 1 ; if ( log . isTraceEnabled ( ) ) { log . trace ( "Channel id: {} requested for stream id: {}" , channelId , streamId ) ; } return channelId ; |
public class ConnecClient { /** * Update an entity remotely
* @ param entity
* name
* @ param groupId
* customer group id
* @ param entityId
* id of the entity to retrieve
* @ param hash
* entity attributes to update
* @ return updated entity
* @ throws AuthenticationException
* @ throws ApiException
* @ throws InvalidRequestException */
public Map < String , Object > update ( String entityName , String groupId , String entityId , Map < String , Object > hash ) throws AuthenticationException , ApiException , InvalidRequestException { } } | return update ( entityName , groupId , entityId , hash , getAuthenticatedClient ( ) ) ; |
public class XmlProcessor { /** * Get from pool , or create one without locking , if needed . */
private DocumentBuilder getDocumentBuilderFromPool ( ) throws ParserConfigurationException { } } | DocumentBuilder builder = documentBuilderPool . pollFirst ( ) ; if ( builder == null ) { builder = getDomFactory ( ) . newDocumentBuilder ( ) ; } builder . setErrorHandler ( errorHandler ) ; return builder ; |
public class AtomTypeAwareSaturationChecker { /** * Check if the bond order can be increased . This method assumes that the
* bond is between only two atoms .
* @ param bond The bond to check
* @ param atomContainer The { @ link IAtomContainer } that the bond belongs to
* @ return True if it is possibly to increase the bond order
* @ throws CDKException */
public boolean bondOrderCanBeIncreased ( IBond bond , IAtomContainer atomContainer ) throws CDKException { } } | boolean atom0isUnsaturated = false , atom1isUnsaturated = false ; double sum ; if ( bond . getBegin ( ) . getBondOrderSum ( ) == null ) { sum = getAtomBondordersum ( bond . getEnd ( ) , atomContainer ) ; } else sum = bond . getBegin ( ) . getBondOrderSum ( ) ; if ( bondsUsed ( bond . getBegin ( ) , atomContainer ) < sum ) atom0isUnsaturated = true ; if ( bond . getEnd ( ) . getBondOrderSum ( ) == null ) { sum = getAtomBondordersum ( bond . getEnd ( ) , atomContainer ) ; } else sum = bond . getEnd ( ) . getBondOrderSum ( ) ; if ( bondsUsed ( bond . getEnd ( ) , atomContainer ) < sum ) atom1isUnsaturated = true ; if ( atom0isUnsaturated == atom1isUnsaturated ) return atom0isUnsaturated ; else { /* * If one of the atoms is saturated and the other isn ' t , what do we
* do then ? Look at the bonds on each side and decide from that . . . */
int myIndex = atomContainer . indexOf ( bond ) ; // If it ' s the first bond , then just move on .
if ( myIndex == 0 ) return false ; /* * If the previous bond is the reason it ' s no problem , so just move
* on . . . */
/* * TODO instead check if the atom that are in both bonds are
* saturated . . . ? */
if ( atomContainer . getBond ( myIndex - 1 ) . getOrder ( ) == IBond . Order . DOUBLE ) return false ; /* * The only reason for trouble should now be that the next bond make
* one of the atoms saturated , so lets throw an exception and
* reveres until we can place a double bond and set it as single and
* continue */
if ( isConnected ( atomContainer . getBond ( myIndex ) , atomContainer . getBond ( 0 ) ) ) throw new CantDecideBondOrderException ( "Can't decide bond order of this bond" ) ; else { return false ; } } |
public class SharedPreferenceUtils { /** * Extract number from string , failsafe . If the string is not a proper number it will always return 0;
* @ param string
* : String that should be converted into a number
* @ return : 0 if conversion to number is failed anyhow , otherwise converted number is returned */
public static int getNumber ( CharSequence string ) { } } | int number = 0 ; if ( ! isEmptyString ( string ) ) { if ( TextUtils . isDigitsOnly ( string ) ) { number = Integer . parseInt ( string . toString ( ) ) ; } } return number ; |
public class Logger { /** * Reset the Logger , i . e . remove all appenders and set the log level to the
* default level . */
public synchronized void resetLogger ( ) { } } | Logger . appenderList . clear ( ) ; Logger . stopWatch . stop ( ) ; Logger . stopWatch . reset ( ) ; Logger . firstLogEvent = true ; |
public class QualityOfServiceBlockingQueue { /** * / * ( non - Javadoc )
* @ see java . util . concurrent . BlockingQueue # drainTo ( java . util . Collection , int ) */
@ Override public final int drainTo ( Collection < ? super T > c , int maxElements ) { } } | // Short circuit using read - lock
if ( this . isEmpty ( ) ) { return 0 ; } this . writeLock . lock ( ) ; try { int count = 0 ; while ( count < this . size && count < maxElements ) { final K key = this . getNextElementKey ( ) ; final Queue < T > queue = this . keyedQueues . get ( key ) ; if ( queue == null || queue . isEmpty ( ) ) { throw new IllegalStateException ( "getNextElementKey returned key='" + key + "' but there are no elements available for the key. This violates the contract specified for getNextElementKey: " + this . toString ( ) ) ; } final T e = queue . poll ( ) ; c . add ( e ) ; count ++ ; } this . size -= count ; if ( count > 0 ) { this . notFull . signal ( ) ; } return count ; } finally { this . writeLock . unlock ( ) ; } |
public class Http2ClientChannel { /** * Gets the in - flight message associated with the a particular stream id .
* @ param streamId stream id
* @ return in - flight message associated with the a particular stream id */
public OutboundMsgHolder getInFlightMessage ( int streamId ) { } } | if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Getting in flight message for stream id: {} from channel: {}" , streamId , this ) ; } return inFlightMessages . get ( streamId ) ; |
import java . io . * ; import java . lang . * ; import java . util . * ; import java . math . * ; class SumTwoDigitElements { /** * This function returns the sum of the numbers with maximum two digits
* from the first ' count ' numbers of a non - empty list ' numbers ' .
* Args :
* numbers : non - empty list of integers
* count : An integer
* Returns :
* The total of numbers with at most two digits among the first ' count ' elements in ' numbers ' .
* Rule :
* Length of ' numbers ' list should be between 1 and 100 ( 1 < = len ( numbers ) < = 100 ) .
* ' count ' should be between 1 and the length of ' numbers ' ( 1 < = count < = len ( numbers ) ) .
* Example :
* Input : numbers = [ 111 , 21 , 3 , 4000 , 5 , 6 , 7 , 8 , 9 ] , count = 4
* Output : 24 ( sum of 21 and 3) */
public static Integer sumTwoDigitElements ( List < Integer > numbers , Integer count ) { } } | int sum = 0 ; for ( int i = 0 ; i < count ; i ++ ) { int num = numbers . get ( i ) ; if ( num >= - 99 && num <= 99 ) { sum += num ; } } return sum ; |
public class ProjectModel { /** * Check if all classes belong to a module */
public void checkAllClassesInModule ( List < String > errors ) { } } | for ( ClassModel clazz : classes . values ( ) ) { if ( clazz . getModule ( ) == null ) { errors . add ( "Class " + clazz + " is in no module" ) ; } } |
public class NlsRuntimeException { /** * @ see # createCopy ( ExceptionTruncation )
* @ param truncation the { @ link ExceptionTruncation } settings .
* @ return the ( truncated ) copy .
* @ deprecated will be removed */
@ Deprecated protected NlsRuntimeException createCopyViaClone ( ExceptionTruncation truncation ) { } } | try { NlsRuntimeException copy = ( NlsRuntimeException ) clone ( ) ; ThrowableHelper . removeDetails ( copy , truncation ) ; return copy ; } catch ( CloneNotSupportedException e ) { throw new IllegalStateException ( e ) ; } |
public class AddAdCustomizer { /** * Creates expanded text ads that use ad customizations for the specified ad group IDs . */
private static void createAdsWithCustomizations ( AdWordsServicesInterface adWordsServices , AdWordsSession session , List < Long > adGroupIds , String feedName ) throws RemoteException { } } | // Get the AdGroupAdService .
AdGroupAdServiceInterface adGroupAdService = adWordsServices . get ( session , AdGroupAdServiceInterface . class ) ; ExpandedTextAd textAd = new ExpandedTextAd ( ) ; textAd . setHeadlinePart1 ( String . format ( "Luxury Cruise to {=%s.Name}" , feedName ) ) ; textAd . setHeadlinePart2 ( String . format ( "Only {=%s.Price}" , feedName ) ) ; textAd . setDescription ( String . format ( "Offer ends in {=countdown(%s.Date)}!" , feedName ) ) ; textAd . setFinalUrls ( new String [ ] { "http://www.example.com" } ) ; // We add the same ad to both ad groups . When they serve , they will show different values , since
// they match different feed items .
List < AdGroupAdOperation > adGroupAdOperations = new ArrayList < > ( ) ; for ( Long adGroupId : adGroupIds ) { AdGroupAd adGroupAd = new AdGroupAd ( ) ; adGroupAd . setAdGroupId ( adGroupId ) ; adGroupAd . setAd ( textAd ) ; AdGroupAdOperation adGroupAdOperation = new AdGroupAdOperation ( ) ; adGroupAdOperation . setOperand ( adGroupAd ) ; adGroupAdOperation . setOperator ( Operator . ADD ) ; adGroupAdOperations . add ( adGroupAdOperation ) ; } AdGroupAdReturnValue adGroupAdReturnValue = adGroupAdService . mutate ( adGroupAdOperations . toArray ( new AdGroupAdOperation [ adGroupAdOperations . size ( ) ] ) ) ; for ( AdGroupAd addedAd : adGroupAdReturnValue . getValue ( ) ) { System . out . printf ( "Created an ad with ID %d, type '%s' and status '%s'.%n" , addedAd . getAd ( ) . getId ( ) , addedAd . getAd ( ) . getAdType ( ) , addedAd . getStatus ( ) ) ; } |
public class Config { /** * Creates a fixed configuration for the supplied { @ link AbstractConfiguration } objects . Only key / value
* pairs from these objects will be present in the final configuration .
* There is no implicit override from system properties . */
public static Config getFixedConfig ( @ Nullable final AbstractConfiguration ... configs ) { } } | final CombinedConfiguration cc = new CombinedConfiguration ( new OverrideCombiner ( ) ) ; if ( configs != null ) { for ( final AbstractConfiguration config : configs ) { cc . addConfiguration ( config ) ; } } return new Config ( cc ) ; |
public class BinarySerde { /** * This method returns shape databuffer from saved earlier file
* @ param readFrom
* @ return
* @ throws IOException */
public static DataBuffer readShapeFromDisk ( File readFrom ) throws IOException { } } | try ( FileInputStream os = new FileInputStream ( readFrom ) ) { FileChannel channel = os . getChannel ( ) ; // we read shapeinfo up to max _ rank value , which is 32
int len = ( int ) Math . min ( ( 32 * 2 + 3 ) * 8 , readFrom . length ( ) ) ; ByteBuffer buffer = ByteBuffer . allocateDirect ( len ) ; channel . read ( buffer ) ; ByteBuffer byteBuffer = buffer == null ? ByteBuffer . allocateDirect ( buffer . array ( ) . length ) . put ( buffer . array ( ) ) . order ( ByteOrder . nativeOrder ( ) ) : buffer . order ( ByteOrder . nativeOrder ( ) ) ; buffer . position ( 0 ) ; int rank = byteBuffer . getInt ( ) ; val result = new long [ Shape . shapeInfoLength ( rank ) ] ; // filling DataBuffer with shape info
result [ 0 ] = rank ; // skipping two next values ( dtype and rank again )
// please , that this time rank has dtype of LONG , so takes 8 bytes .
byteBuffer . position ( 16 ) ; // filling shape information
for ( int e = 1 ; e < Shape . shapeInfoLength ( rank ) ; e ++ ) { result [ e ] = byteBuffer . getLong ( ) ; } // creating nd4j databuffer now
DataBuffer dataBuffer = Nd4j . getDataBufferFactory ( ) . createLong ( result ) ; return dataBuffer ; } |
public class ListUtils { /** * Obtains a random sample without replacement from a source list and places
* it in the destination list . This is done without modifying the source list .
* @ param < T > the list content type involved
* @ param source the source of values to randomly sample from
* @ param dest the list to store the random samples in . The list does not
* need to be empty for the sampling to work correctly
* @ param samples the number of samples to select from the source
* @ throws IllegalArgumentException if the sample size is not positive or l
* arger than the source population . */
public static < T > void randomSample ( List < T > source , List < T > dest , int samples ) { } } | randomSample ( source , dest , samples , RandomUtil . getRandom ( ) ) ; |
public class Distribution { /** * This method returns a double array containing the values of random samples from this distribution .
* @ param numSamples the number of random samples to take
* @ param rand the source of randomness
* @ return a vector of the random sample values */
public DenseVector sampleVec ( int numSamples , Random rand ) { } } | return DenseVector . toDenseVec ( sample ( numSamples , rand ) ) ; |
public class Scanners { /** * A scanner that scans greedily for 1 or more characters that satisfies the given CharPredicate .
* @ param predicate the predicate object .
* @ return the Parser object . */
public static Parser < Void > many1 ( CharPredicate predicate ) { } } | return Patterns . many1 ( predicate ) . toScanner ( predicate + "+" ) ; |
public class ImageIOHelper { /** * Creates a list of TIFF image files from an image file . It basically
* converts images of other formats to TIFF format , or a multi - page TIFF
* image to multiple TIFF image files .
* @ param imageFile input image file
* @ param index an index of the page ; - 1 means all pages , as in a multi - page
* TIFF image
* @ return a list of TIFF image files
* @ throws IOException */
public static List < File > createTiffFiles ( File imageFile , int index ) throws IOException { } } | return createTiffFiles ( imageFile , index , false ) ; |
public class CClassLoader { /** * destroy the loader tree */
public static final void destroy ( ) { } } | if ( CClassLoader . rootLoader == null ) { return ; } System . out . println ( "Destroying YAHP ClassLoader Tree" ) ; CClassLoader . urlLoader = null ; try { Field f = Class . forName ( "java.lang.Shutdown" ) . getDeclaredField ( "hooks" ) ; f . setAccessible ( true ) ; ArrayList l = ( ArrayList ) f . get ( null ) ; for ( Iterator it = l . iterator ( ) ; it . hasNext ( ) ; ) { Object o = it . next ( ) ; if ( ( o != null ) && ( o . getClass ( ) . getClassLoader ( ) != null ) && ( o . getClass ( ) . getClassLoader ( ) . getClass ( ) == CClassLoader . class ) ) { it . remove ( ) ; } } } catch ( Throwable ignore ) { } try { Field f = Class . forName ( "java.lang.ApplicationShutdownHooks" ) . getDeclaredField ( "hooks" ) ; f . setAccessible ( true ) ; IdentityHashMap l = ( IdentityHashMap ) f . get ( null ) ; for ( Iterator it = l . entrySet ( ) . iterator ( ) ; it . hasNext ( ) ; ) { Entry e = ( Entry ) it . next ( ) ; Thread o = ( Thread ) e . getKey ( ) ; if ( ( o != null ) && ( o . getClass ( ) . getClassLoader ( ) != null ) && ( o . getClass ( ) . getClassLoader ( ) . getClass ( ) == CClassLoader . class ) ) { it . remove ( ) ; continue ; } o = ( Thread ) e . getValue ( ) ; if ( ( o != null ) && ( o . getClass ( ) . getClassLoader ( ) != null ) && ( o . getClass ( ) . getClassLoader ( ) . getClass ( ) == CClassLoader . class ) ) { it . remove ( ) ; } } } catch ( Throwable ignore ) { } try { if ( ( UIManager . getLookAndFeel ( ) != null ) && ( UIManager . getLookAndFeel ( ) . getClass ( ) . getClassLoader ( ) != null ) && ( UIManager . getLookAndFeel ( ) . getClass ( ) . getClassLoader ( ) . getClass ( ) == CClassLoader . class ) ) { UIManager . setLookAndFeel ( ( LookAndFeel ) null ) ; } Field f = UIManager . class . getDeclaredField ( "currentLAFState" ) ; f . setAccessible ( true ) ; Object lafstate = f . get ( null ) ; if ( lafstate != null ) { Field fmultiUIDefaults = lafstate . getClass ( ) . getDeclaredField ( "multiUIDefaults" ) ; fmultiUIDefaults . setAccessible ( true ) ; Object multiUIDefaults = fmultiUIDefaults . get ( lafstate ) ; Method clear = multiUIDefaults . getClass ( ) . getDeclaredMethod ( "clear" , ( Class [ ] ) null ) ; clear . setAccessible ( true ) ; clear . invoke ( multiUIDefaults , ( Object [ ] ) null ) ; Field tbl = lafstate . getClass ( ) . getDeclaredField ( "tables" ) ; tbl . setAccessible ( true ) ; Hashtable [ ] tables = ( Hashtable [ ] ) tbl . get ( lafstate ) ; if ( tables != null ) { for ( int i = 0 ; i < tables . length ; i ++ ) { Hashtable element = tables [ i ] ; if ( element != null ) { element . clear ( ) ; } } } } } catch ( Throwable ignore ) { } try { Hashtable tb = UIManager . getDefaults ( ) ; Object cl = tb . get ( "ClassLoader" ) ; if ( cl . getClass ( ) == CClassLoader . class ) { tb . put ( "ClassLoader" , CClassLoader . rootLoader . getParent ( ) ) ; } } catch ( Throwable ignore ) { } Method logFactoryRelease = null ; try { logFactoryRelease = CClassLoader . rootLoader . loadClass ( "org.apache.commons.logging.LogFactory" ) . getMethod ( "release" , new Class [ ] { ClassLoader . class } ) ; } catch ( final Throwable ignore ) { } CClassLoader . rootLoader . _destroy ( logFactoryRelease ) ; CClassLoader . mandatoryLoadersMap . clear ( ) ; CClassLoader . rootLoader = null ; // deregister any sql driver loaded
try { final List deregisterList = new ArrayList ( ) ; for ( final Enumeration it = DriverManager . getDrivers ( ) ; it . hasMoreElements ( ) ; ) { final Driver d = ( Driver ) it . nextElement ( ) ; if ( ( d != null ) && ( d . getClass ( ) . getClassLoader ( ) != null ) && ( d . getClass ( ) . getClassLoader ( ) . getClass ( ) == CClassLoader . class ) ) { deregisterList . add ( d ) ; } } for ( int i = 0 ; i < deregisterList . size ( ) ; i ++ ) { final Driver driver = ( Driver ) deregisterList . get ( i ) ; DriverManager . deregisterDriver ( driver ) ; } } catch ( final Throwable ignore ) { } // stop dandling thread created with this classloader
// tested only on sun jdk
ThreadGroup tg = Thread . currentThread ( ) . getThreadGroup ( ) ; while ( ( tg != null ) && ( tg . getParent ( ) != null ) ) { tg = tg . getParent ( ) ; } List ltg = new ArrayList ( ) ; ltg . add ( tg ) ; CClassLoader . getThreadGroups ( tg , ltg ) ; for ( int ii = 0 ; ii < ltg . size ( ) ; ii ++ ) { try { final ThreadGroup g = ( ThreadGroup ) ltg . get ( ii ) ; final Field fthreads = ThreadGroup . class . getDeclaredField ( "threads" ) ; fthreads . setAccessible ( true ) ; final List toStop = new ArrayList ( ) ; Object threads [ ] = null ; if ( fthreads . getType ( ) == Vector . class ) { // in gnu classpath
threads = ( ( Vector ) fthreads . get ( g ) ) . toArray ( ) ; } else { // sun
threads = ( Object [ ] ) fthreads . get ( g ) ; } for ( int i = 0 ; i < threads . length ; i ++ ) { if ( threads [ i ] == null ) { continue ; } if ( ( threads [ i ] != null ) && ( ( ( Thread ) threads [ i ] ) . getContextClassLoader ( ) != null ) && ( ( ( Thread ) threads [ i ] ) . getContextClassLoader ( ) . getClass ( ) == CClassLoader . class ) ) { ( ( Thread ) threads [ i ] ) . setContextClassLoader ( null ) ; } if ( ( threads [ i ] != null ) && ( threads [ i ] . getClass ( ) . getClassLoader ( ) != null ) && ( threads [ i ] . getClass ( ) . getClassLoader ( ) . getClass ( ) == CClassLoader . class ) ) { toStop . add ( ( Thread ) threads [ i ] ) ; } // remove any object in threadLocal referring an object
// loaded
// by this classloader tree
try { final Field fthreadLocals = Thread . class . getDeclaredField ( "threadLocals" ) ; fthreadLocals . setAccessible ( true ) ; final Object threadLocals = fthreadLocals . get ( threads [ i ] ) ; if ( threadLocals != null ) { final Field ftable = threadLocals . getClass ( ) . getDeclaredField ( "table" ) ; ftable . setAccessible ( true ) ; final Object table [ ] = ( Object [ ] ) ftable . get ( threadLocals ) ; for ( int kk = 0 ; kk < table . length ; kk ++ ) { final Object element = table [ kk ] ; if ( element != null ) { final Field fvalue = element . getClass ( ) . getDeclaredField ( "value" ) ; fvalue . setAccessible ( true ) ; final Object value = fvalue . get ( element ) ; if ( ( value != null ) && ( value . getClass ( ) . getClassLoader ( ) != null ) && ( value . getClass ( ) . getClassLoader ( ) . getClass ( ) == CClassLoader . class ) ) { fvalue . set ( element , null ) ; } if ( value instanceof Map ) { clearMap ( ( Map ) value ) ; } else if ( value instanceof List ) { clearList ( ( List ) value ) ; } else if ( value instanceof Set ) { clearSet ( ( Set ) value ) ; } else if ( value instanceof Object [ ] ) { clearArray ( ( Object [ ] ) value ) ; } fvalue . setAccessible ( false ) ; } } ftable . setAccessible ( false ) ; } fthreadLocals . setAccessible ( false ) ; } catch ( final Throwable ignore ) { ignore . printStackTrace ( ) ; } // remove any object in threadLocal referring an object
// loaded
// by this classloader tree
try { final Field fthreadLocals = Thread . class . getDeclaredField ( "inheritableThreadLocals" ) ; fthreadLocals . setAccessible ( true ) ; final Object threadLocals = fthreadLocals . get ( threads [ i ] ) ; if ( threadLocals != null ) { final Field ftable = threadLocals . getClass ( ) . getDeclaredField ( "table" ) ; ftable . setAccessible ( true ) ; final Object table [ ] = ( Object [ ] ) ftable . get ( threadLocals ) ; for ( int kk = 0 ; kk < table . length ; kk ++ ) { final Object element = table [ kk ] ; if ( element != null ) { final Field fvalue = element . getClass ( ) . getDeclaredField ( "value" ) ; fvalue . setAccessible ( true ) ; final Object value = fvalue . get ( element ) ; if ( ( value != null ) && ( value . getClass ( ) . getClassLoader ( ) != null ) && ( value . getClass ( ) . getClassLoader ( ) . getClass ( ) == CClassLoader . class ) ) { fvalue . set ( element , null ) ; } if ( value instanceof Map ) { clearMap ( ( Map ) value ) ; } else if ( value instanceof List ) { clearList ( ( List ) value ) ; } else if ( value instanceof Set ) { clearSet ( ( Set ) value ) ; } else if ( value instanceof Object [ ] ) { clearArray ( ( Object [ ] ) value ) ; } fvalue . setAccessible ( false ) ; } } ftable . setAccessible ( false ) ; } fthreadLocals . setAccessible ( false ) ; } catch ( final Throwable ignore ) { ignore . printStackTrace ( ) ; } // remove any protection domain referring this loader tree
try { final Field finheritedAccessControlContext = Thread . class . getDeclaredField ( "inheritedAccessControlContext" ) ; finheritedAccessControlContext . setAccessible ( true ) ; final Object inheritedAccessControlContext = finheritedAccessControlContext . get ( threads [ i ] ) ; if ( inheritedAccessControlContext != null ) { final Field fcontext = AccessControlContext . class . getDeclaredField ( "context" ) ; fcontext . setAccessible ( true ) ; final Object context [ ] = ( Object [ ] ) fcontext . get ( inheritedAccessControlContext ) ; if ( context != null ) { for ( int k = 0 ; k < context . length ; k ++ ) { if ( context [ k ] == null ) continue ; final Field fclassloader = ProtectionDomain . class . getDeclaredField ( "classloader" ) ; fclassloader . setAccessible ( true ) ; final Object classloader = fclassloader . get ( context [ k ] ) ; if ( ( classloader != null ) && ( classloader . getClass ( ) == CClassLoader . class ) ) { context [ k ] = null ; } fclassloader . setAccessible ( false ) ; } } fcontext . setAccessible ( false ) ; } finheritedAccessControlContext . setAccessible ( false ) ; } catch ( final Throwable ignore ) { ignore . printStackTrace ( ) ; } } fthreads . setAccessible ( false ) ; for ( int i = 0 ; i < toStop . size ( ) ; i ++ ) { try { final Thread t = ( Thread ) toStop . get ( i ) ; final Method stop = t . getClass ( ) . getMethod ( "stop" , ( Class [ ] ) null ) ; stop . invoke ( t , ( Object [ ] ) null ) ; } catch ( final Throwable ignore ) { } } } catch ( final Throwable ignore ) { } } try { CThreadContext . destroy ( ) ; } catch ( Throwable ignore ) { } System . runFinalization ( ) ; System . gc ( ) ; Introspector . flushCaches ( ) ; System . out . println ( "Destroying YAHP ClassLoader Tree : done" ) ; |
public class Predicates { /** * Returns a predicate that evaluates to { @ code true } if any one of its
* components evaluates to { @ code true } . The components are evaluated in
* order , and evaluation will be " short - circuited " as soon as as soon as a
* true predicate is found . It defensively copies the iterable passed in , so
* future changes to it won ' t alter the behavior of this predicate . If { @ code
* components } is empty , the returned predicate will always evaluate to { @ code
* false } .
* @ param components the components
* @ return a predicate */
public static < T > Predicate < T > or ( Collection < ? extends Predicate < ? super T > > components ) { } } | return new OrPredicate < T > ( components ) ; |
public class InternalSARLParser { /** * InternalSARL . g : 13743:1 : ruleXCollectionLiteral returns [ EObject current = null ] : ( this _ XSetLiteral _ 0 = ruleXSetLiteral | this _ XListLiteral _ 1 = ruleXListLiteral ) ; */
public final EObject ruleXCollectionLiteral ( ) throws RecognitionException { } } | EObject current = null ; EObject this_XSetLiteral_0 = null ; EObject this_XListLiteral_1 = null ; enterRule ( ) ; try { // InternalSARL . g : 13749:2 : ( ( this _ XSetLiteral _ 0 = ruleXSetLiteral | this _ XListLiteral _ 1 = ruleXListLiteral ) )
// InternalSARL . g : 13750:2 : ( this _ XSetLiteral _ 0 = ruleXSetLiteral | this _ XListLiteral _ 1 = ruleXListLiteral )
{ // InternalSARL . g : 13750:2 : ( this _ XSetLiteral _ 0 = ruleXSetLiteral | this _ XListLiteral _ 1 = ruleXListLiteral )
int alt326 = 2 ; int LA326_0 = input . LA ( 1 ) ; if ( ( LA326_0 == 106 ) ) { int LA326_1 = input . LA ( 2 ) ; if ( ( LA326_1 == 55 ) ) { alt326 = 2 ; } else if ( ( LA326_1 == 29 ) ) { alt326 = 1 ; } else { if ( state . backtracking > 0 ) { state . failed = true ; return current ; } NoViableAltException nvae = new NoViableAltException ( "" , 326 , 1 , input ) ; throw nvae ; } } else { if ( state . backtracking > 0 ) { state . failed = true ; return current ; } NoViableAltException nvae = new NoViableAltException ( "" , 326 , 0 , input ) ; throw nvae ; } switch ( alt326 ) { case 1 : // InternalSARL . g : 13751:3 : this _ XSetLiteral _ 0 = ruleXSetLiteral
{ if ( state . backtracking == 0 ) { newCompositeNode ( grammarAccess . getXCollectionLiteralAccess ( ) . getXSetLiteralParserRuleCall_0 ( ) ) ; } pushFollow ( FOLLOW_2 ) ; this_XSetLiteral_0 = ruleXSetLiteral ( ) ; state . _fsp -- ; if ( state . failed ) return current ; if ( state . backtracking == 0 ) { current = this_XSetLiteral_0 ; afterParserOrEnumRuleCall ( ) ; } } break ; case 2 : // InternalSARL . g : 13760:3 : this _ XListLiteral _ 1 = ruleXListLiteral
{ if ( state . backtracking == 0 ) { newCompositeNode ( grammarAccess . getXCollectionLiteralAccess ( ) . getXListLiteralParserRuleCall_1 ( ) ) ; } pushFollow ( FOLLOW_2 ) ; this_XListLiteral_1 = ruleXListLiteral ( ) ; state . _fsp -- ; if ( state . failed ) return current ; if ( state . backtracking == 0 ) { current = this_XListLiteral_1 ; afterParserOrEnumRuleCall ( ) ; } } break ; } } if ( state . backtracking == 0 ) { leaveRule ( ) ; } } catch ( RecognitionException re ) { recover ( input , re ) ; appendSkippedTokens ( ) ; } finally { } return current ; |
public class DeleteAliasRequestMarshaller { /** * Marshall the given parameter object . */
public void marshall ( DeleteAliasRequest deleteAliasRequest , ProtocolMarshaller protocolMarshaller ) { } } | if ( deleteAliasRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( deleteAliasRequest . getFunctionName ( ) , FUNCTIONNAME_BINDING ) ; protocolMarshaller . marshall ( deleteAliasRequest . getName ( ) , NAME_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; } |
public class Initialization { /** * Resolves the described entry point .
* @ param classLoaderResolver The class loader resolved to use .
* @ param groupId This project ' s group id .
* @ param artifactId This project ' s artifact id .
* @ param version This project ' s version id .
* @ param packaging This project ' s packaging
* @ return The resolved entry point .
* @ throws MojoExecutionException If the entry point cannot be created . */
@ SuppressFBWarnings ( value = "REC_CATCH_EXCEPTION" , justification = "Applies Maven exception wrapper" ) public EntryPoint getEntryPoint ( ClassLoaderResolver classLoaderResolver , String groupId , String artifactId , String version , String packaging ) throws MojoExecutionException { } } | if ( entryPoint == null || entryPoint . length ( ) == 0 ) { throw new MojoExecutionException ( "Entry point name is not defined" ) ; } for ( EntryPoint . Default entryPoint : EntryPoint . Default . values ( ) ) { if ( this . entryPoint . equals ( entryPoint . name ( ) ) ) { return entryPoint ; } } try { return ( EntryPoint ) Class . forName ( entryPoint , false , classLoaderResolver . resolve ( asCoordinate ( groupId , artifactId , version , packaging ) ) ) . getDeclaredConstructor ( ) . newInstance ( ) ; } catch ( Exception exception ) { throw new MojoExecutionException ( "Cannot create entry point: " + entryPoint , exception ) ; } |
public class VocabularyHolder { /** * build binary tree ordered by counter .
* Based on original w2v by google */
public List < VocabularyWord > updateHuffmanCodes ( ) { } } | int min1i ; int min2i ; int b ; int i ; // get vocabulary as sorted list
List < VocabularyWord > vocab = this . words ( ) ; int count [ ] = new int [ vocab . size ( ) * 2 + 1 ] ; int parent_node [ ] = new int [ vocab . size ( ) * 2 + 1 ] ; byte binary [ ] = new byte [ vocab . size ( ) * 2 + 1 ] ; // at this point vocab is sorted , with descending order
for ( int a = 0 ; a < vocab . size ( ) ; a ++ ) count [ a ] = vocab . get ( a ) . getCount ( ) ; for ( int a = vocab . size ( ) ; a < vocab . size ( ) * 2 ; a ++ ) count [ a ] = Integer . MAX_VALUE ; int pos1 = vocab . size ( ) - 1 ; int pos2 = vocab . size ( ) ; for ( int a = 0 ; a < vocab . size ( ) ; a ++ ) { // First , find two smallest nodes ' min1 , min2'
if ( pos1 >= 0 ) { if ( count [ pos1 ] < count [ pos2 ] ) { min1i = pos1 ; pos1 -- ; } else { min1i = pos2 ; pos2 ++ ; } } else { min1i = pos2 ; pos2 ++ ; } if ( pos1 >= 0 ) { if ( count [ pos1 ] < count [ pos2 ] ) { min2i = pos1 ; pos1 -- ; } else { min2i = pos2 ; pos2 ++ ; } } else { min2i = pos2 ; pos2 ++ ; } count [ vocab . size ( ) + a ] = count [ min1i ] + count [ min2i ] ; parent_node [ min1i ] = vocab . size ( ) + a ; parent_node [ min2i ] = vocab . size ( ) + a ; binary [ min2i ] = 1 ; } // Now assign binary code to each vocabulary word
byte [ ] code = new byte [ MAX_CODE_LENGTH ] ; int [ ] point = new int [ MAX_CODE_LENGTH ] ; for ( int a = 0 ; a < vocab . size ( ) ; a ++ ) { b = a ; i = 0 ; byte [ ] lcode = new byte [ MAX_CODE_LENGTH ] ; int [ ] lpoint = new int [ MAX_CODE_LENGTH ] ; while ( true ) { code [ i ] = binary [ b ] ; point [ i ] = b ; i ++ ; b = parent_node [ b ] ; if ( b == vocab . size ( ) * 2 - 2 ) break ; } lpoint [ 0 ] = vocab . size ( ) - 2 ; for ( b = 0 ; b < i ; b ++ ) { lcode [ i - b - 1 ] = code [ b ] ; lpoint [ i - b ] = point [ b ] - vocab . size ( ) ; } vocab . get ( a ) . setHuffmanNode ( new HuffmanNode ( lcode , lpoint , a , ( byte ) i ) ) ; } idxMap . clear ( ) ; for ( VocabularyWord word : vocab ) { idxMap . put ( word . getHuffmanNode ( ) . getIdx ( ) , word ) ; } return vocab ; |
public class CodedConstant { /** * write list to { @ link CodedOutputStream } object .
* @ param out target output stream to write
* @ param order field order
* @ param type field type
* @ param list target list object to be serialized
* @ param packed the packed
* @ throws IOException Signals that an I / O exception has occurred . */
public static void writeToList ( CodedOutputStream out , int order , FieldType type , List list , boolean packed ) throws IOException { } } | if ( list == null || list . isEmpty ( ) ) { return ; } if ( packed ) { out . writeUInt32NoTag ( makeTag ( order , WireFormat . WIRETYPE_LENGTH_DELIMITED ) ) ; out . writeUInt32NoTag ( computeListSize ( order , list , type , false , null , packed , true ) ) ; } for ( Object object : list ) { if ( object == null ) { throw new NullPointerException ( "List can not include Null value." ) ; } writeObject ( out , order , type , object , true , ! packed ) ; } |
public class Consumer { /** * Dequeues messages from the local buffer as specified by the limit . If no messages are available to dequeue , then waits for at most timeout
* milliseconds before returning .
* @ param < T > The result type .
* @ param topic The topic to dequeue messages from .
* @ param type The type that each message should be converted to .
* @ param timeout The max amount of time in milliseconds that the function can take to dequeue limit number of messages . If number of dequeued
* messages is less than limit , then only those messages are returned .
* @ param limit The max number of messages to dequeue .
* @ return Messages of the given type belonging to the given topic . Empty list if no such topic exists or the method times out . */
public < T extends Serializable > List < T > dequeueFromBuffer ( String topic , Class < T > type , int timeout , int limit ) { } } | List < T > result = new ArrayList < T > ( ) ; long cutoff = System . currentTimeMillis ( ) + timeout ; BlockingQueue < String > queue = _topics . get ( topic ) . getMessages ( ) ; while ( System . currentTimeMillis ( ) < cutoff && ( limit < 0 || result . size ( ) < limit ) ) { if ( Thread . currentThread ( ) . isInterrupted ( ) ) { break ; } try { String message = queue . poll ( timeout , TimeUnit . MILLISECONDS ) ; if ( message != null && ! message . isEmpty ( ) ) { if ( String . class . isAssignableFrom ( type ) ) { result . add ( type . cast ( message ) ) ; } else { result . add ( _mapper . readValue ( message , type ) ) ; } if ( result . size ( ) % 1000 == 0 ) { _logger . debug ( "Dequeued {} messages from local buffer." , result . size ( ) ) ; } } } catch ( InterruptedException e ) { _logger . warn ( "Interrupted while waiting for poll() to return a message." ) ; Thread . currentThread ( ) . interrupt ( ) ; } catch ( IOException e ) { _logger . warn ( "Exception while deserializing message to type: " + type + ". Skipping this message." , e ) ; } } return result ; |
public class TasksInner { /** * Create or update task .
* The tasks resource is a nested , proxy - only resource representing work performed by a DMS instance . The PUT method creates a new task or updates an existing one , although since tasks have no mutable custom properties , there is little reason to update an existing one .
* @ param groupName Name of the resource group
* @ param serviceName Name of the service
* @ param projectName Name of the project
* @ param taskName Name of the Task
* @ param parameters Information about the task
* @ param serviceCallback the async ServiceCallback to handle successful and failed responses .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the { @ link ServiceFuture } object */
public ServiceFuture < ProjectTaskInner > createOrUpdateAsync ( String groupName , String serviceName , String projectName , String taskName , ProjectTaskInner parameters , final ServiceCallback < ProjectTaskInner > serviceCallback ) { } } | return ServiceFuture . fromResponse ( createOrUpdateWithServiceResponseAsync ( groupName , serviceName , projectName , taskName , parameters ) , serviceCallback ) ; |
public class GetNextCaCertResponseHandler { /** * { @ inheritDoc } */
@ Override public CertStore getResponse ( final byte [ ] content , final String mimeType ) throws ContentException { } } | if ( mimeType . startsWith ( NEXT_CA_CERT ) ) { // http : / / tools . ietf . org / html / draft - nourse - scep - 20 # section - 4.6.1
// The response consists of a SignedData PKCS # 7 [ RFC2315 ] ,
// signed by the current CA ( or RA ) signing key .
try { CMSSignedData cmsMessageData = new CMSSignedData ( content ) ; ContentInfo cmsContentInfo = ContentInfo . getInstance ( cmsMessageData . getEncoded ( ) ) ; final CMSSignedData sd = new CMSSignedData ( cmsContentInfo ) ; if ( ! SignedDataUtils . isSignedBy ( sd , signer ) ) { throw new InvalidContentException ( "Invalid Signer" ) ; } // The content of the SignedData PKCS # 7 [ RFC2315 ] is a
// degenerate
// certificates - only Signed - data ( Section 3.3 ) message
// containing the
// new CA certificate and any new RA certificates , as defined in
// Section 5.2.1.1.2 , to be used when the current CA certificate
// expires .
return SignedDataUtils . fromSignedData ( sd ) ; } catch ( IOException e ) { throw new InvalidContentTypeException ( e ) ; } catch ( CMSException e ) { throw new InvalidContentTypeException ( e ) ; } } else { throw new InvalidContentTypeException ( mimeType , NEXT_CA_CERT ) ; } |
public class VirtualMachineRunCommandsInner { /** * Lists all available run commands for a subscription in a location .
* @ param location The location upon which run commands is queried .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable to the PagedList & lt ; RunCommandDocumentBaseInner & gt ; object */
public Observable < Page < RunCommandDocumentBaseInner > > listAsync ( final String location ) { } } | return listWithServiceResponseAsync ( location ) . map ( new Func1 < ServiceResponse < Page < RunCommandDocumentBaseInner > > , Page < RunCommandDocumentBaseInner > > ( ) { @ Override public Page < RunCommandDocumentBaseInner > call ( ServiceResponse < Page < RunCommandDocumentBaseInner > > response ) { return response . body ( ) ; } } ) ; |
public class AmazonGameLiftClient { /** * Updates settings for a game session queue , which determines how new game session requests in the queue are
* processed . To update settings , specify the queue name to be updated and provide the new settings . When updating
* destinations , provide a complete list of destinations .
* < ul >
* < li >
* < a > CreateGameSessionQueue < / a >
* < / li >
* < li >
* < a > DescribeGameSessionQueues < / a >
* < / li >
* < li >
* < a > UpdateGameSessionQueue < / a >
* < / li >
* < li >
* < a > DeleteGameSessionQueue < / a >
* < / li >
* < / ul >
* @ param updateGameSessionQueueRequest
* Represents the input for a request action .
* @ return Result of the UpdateGameSessionQueue operation returned by the service .
* @ throws InternalServiceException
* The service encountered an unrecoverable internal failure while processing the request . Clients can retry
* such requests immediately or after a waiting period .
* @ throws InvalidRequestException
* One or more parameter values in the request are invalid . Correct the invalid parameter values before
* retrying .
* @ throws NotFoundException
* A service resource associated with the request could not be found . Clients should not retry such
* requests .
* @ throws UnauthorizedException
* The client failed authentication . Clients should not retry such requests .
* @ sample AmazonGameLift . UpdateGameSessionQueue
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / gamelift - 2015-10-01 / UpdateGameSessionQueue "
* target = " _ top " > AWS API Documentation < / a > */
@ Override public UpdateGameSessionQueueResult updateGameSessionQueue ( UpdateGameSessionQueueRequest request ) { } } | request = beforeClientExecution ( request ) ; return executeUpdateGameSessionQueue ( request ) ; |
public class CredentialsConfig { /** * Not like getPassword this will return the username of the current Credentials mode of the system ( legacy / credentials plugin )
* @ return the password that should be apply in this configuration */
public String providePassword ( Item item ) { } } | return isUsingCredentialsPlugin ( ) ? PluginsUtils . credentialsLookup ( credentialsId , item ) . getPassword ( ) : credentials . getPassword ( ) ; |
public class Model { /** * Adds an edge to the model .
* If either the source or target vertex of the edge is not in the model ,
* they will be automatically added as well .
* @ param edge The edge to be added .
* @ return The model . */
public Model addEdge ( Edge edge ) { } } | edges . add ( edge ) ; if ( isNotNull ( edge . getSourceVertex ( ) ) && ! vertices . contains ( edge . getSourceVertex ( ) ) ) { vertices . add ( edge . getSourceVertex ( ) ) ; } if ( isNotNull ( edge . getTargetVertex ( ) ) && ! vertices . contains ( edge . getTargetVertex ( ) ) ) { vertices . add ( edge . getTargetVertex ( ) ) ; } return this ; |
public class TransformerRegistry { /** * Resolve the host registry .
* @ param mgmtVersion the mgmt version
* @ param subsystems the subsystems
* @ return the transformer registry */
public OperationTransformerRegistry resolveHost ( final ModelVersion mgmtVersion , final ModelNode subsystems ) { } } | return resolveHost ( mgmtVersion , resolveVersions ( subsystems ) ) ; |
public class DatabaseAccountsInner { /** * Lists the read - only access keys for the specified Azure Cosmos DB database account .
* @ param resourceGroupName Name of an Azure resource group .
* @ param accountName Cosmos DB database account name .
* @ param serviceCallback the async ServiceCallback to handle successful and failed responses .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the { @ link ServiceFuture } object */
public ServiceFuture < DatabaseAccountListReadOnlyKeysResultInner > listReadOnlyKeysAsync ( String resourceGroupName , String accountName , final ServiceCallback < DatabaseAccountListReadOnlyKeysResultInner > serviceCallback ) { } } | return ServiceFuture . fromResponse ( listReadOnlyKeysWithServiceResponseAsync ( resourceGroupName , accountName ) , serviceCallback ) ; |
public class StreamSet { /** * Get an iterator over all of the non - null streams . The order is not
* guaranteed .
* @ throws SIResourceException */
public Iterator < Stream > iterator ( ) throws SIResourceException { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "iterator" ) ; List < Stream > streams = new ArrayList < Stream > ( ) ; for ( int j = 0 ; j < maxReliabilityIndex + 1 ; j ++ ) { ReliabilitySubset subset = getSubset ( getReliability ( j ) ) ; if ( subset != null ) { for ( int i = 0 ; i < SIMPConstants . MSG_HIGH_PRIORITY + 1 ; i ++ ) { Stream stream = subset . getStream ( i ) ; if ( stream != null ) streams . add ( stream ) ; } } } Iterator < Stream > itr = streams . iterator ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "iterator" , itr ) ; return itr ; |
public class MeshArbiter { /** * Collect the failure site update messages from all sites This site sent
* its own mailbox the above broadcast the maximum is local to this site .
* This also ensures at least one response .
* Concurrent failures can be detected by additional reports from the FaultDistributor
* or a mismatch in the set of failed hosts reported in a message from another site */
private boolean discoverGlobalFaultData_rcv ( Set < Long > hsIds ) { } } | long blockedOnReceiveStart = System . currentTimeMillis ( ) ; long lastReportTime = 0 ; boolean haveEnough = false ; int [ ] forwardStallCount = new int [ ] { FORWARD_STALL_COUNT } ; do { VoltMessage m = m_mailbox . recvBlocking ( receiveSubjects , 5 ) ; /* * If fault resolution takes longer then 10 seconds start logging */
final long now = System . currentTimeMillis ( ) ; if ( now - blockedOnReceiveStart > 10000 ) { if ( now - lastReportTime > 60000 ) { lastReportTime = System . currentTimeMillis ( ) ; haveNecessaryFaultInfo ( m_seeker . getSurvivors ( ) , true ) ; } } if ( m == null ) { // Send a heartbeat to keep the dead host timeout active . Needed because IV2 doesn ' t
// generate its own heartbeats to keep this running .
m_meshAide . sendHeartbeats ( m_seeker . getSurvivors ( ) ) ; } else if ( m . getSubject ( ) == Subject . SITE_FAILURE_UPDATE . getId ( ) ) { SiteFailureMessage sfm = ( SiteFailureMessage ) m ; if ( ! m_seeker . getSurvivors ( ) . contains ( m . m_sourceHSId ) || m_failedSites . contains ( m . m_sourceHSId ) || m_failedSites . containsAll ( sfm . getFailedSites ( ) ) ) continue ; if ( ! sfm . m_decision . isEmpty ( ) ) { m_decidedSurvivors . put ( sfm . m_sourceHSId , sfm ) ; } updateFailedSitesLedger ( hsIds , sfm ) ; m_seeker . add ( sfm ) ; addForwardCandidate ( new SiteFailureForwardMessage ( sfm ) ) ; m_recoveryLog . info ( "Agreement, Received " + sfm ) ; if ( m_recoveryLog . isDebugEnabled ( ) ) { m_recoveryLog . info ( String . format ( "\n %s\n %s\n %s\n %s\n %s" , m_seeker . dumpAlive ( ) , m_seeker . dumpDead ( ) , m_seeker . dumpReported ( ) , m_seeker . dumpSurvivors ( ) , dumpInTrouble ( ) ) ) ; } } else if ( m . getSubject ( ) == Subject . SITE_FAILURE_FORWARD . getId ( ) ) { SiteFailureForwardMessage fsfm = ( SiteFailureForwardMessage ) m ; addForwardCandidate ( fsfm ) ; if ( ! hsIds . contains ( fsfm . m_sourceHSId ) || m_seeker . getSurvivors ( ) . contains ( fsfm . m_reportingHSId ) || m_failedSites . contains ( fsfm . m_reportingHSId ) || m_failedSites . containsAll ( fsfm . getFailedSites ( ) ) ) continue ; m_seeker . add ( fsfm ) ; m_recoveryLog . info ( "Agreement, Received forward " + fsfm ) ; if ( m_recoveryLog . isDebugEnabled ( ) ) { m_recoveryLog . debug ( String . format ( "\n %s\n %s\n %s\n %s\n %s" , m_seeker . dumpAlive ( ) , m_seeker . dumpDead ( ) , m_seeker . dumpReported ( ) , m_seeker . dumpSurvivors ( ) , dumpInTrouble ( ) ) ) ; } forwardStallCount [ 0 ] = FORWARD_STALL_COUNT ; } else if ( m . getSubject ( ) == Subject . FAILURE . getId ( ) ) { /* * If the fault distributor reports a new fault , ignore it if it is known , otherwise
* re - deliver the message to ourself and then abort so that the process can restart . */
FaultMessage fm = ( FaultMessage ) m ; Discard ignoreIt = mayIgnore ( hsIds , fm ) ; if ( Discard . DoNot == ignoreIt ) { m_mailbox . deliverFront ( m ) ; m_recoveryLog . info ( "Agreement, Detected a concurrent failure from FaultDistributor, new failed site " + CoreUtils . hsIdToString ( fm . failedSite ) ) ; return false ; } else { if ( m_recoveryLog . isDebugEnabled ( ) ) { ignoreIt . log ( fm ) ; } } } haveEnough = haveEnough || haveNecessaryFaultInfo ( m_seeker . getSurvivors ( ) , false ) ; if ( haveEnough ) { Iterator < Map . Entry < Long , SiteFailureForwardMessage > > itr = m_forwardCandidates . entrySet ( ) . iterator ( ) ; while ( itr . hasNext ( ) ) { Map . Entry < Long , SiteFailureForwardMessage > e = itr . next ( ) ; Set < Long > unseenBy = m_seeker . forWhomSiteIsDead ( e . getKey ( ) ) ; if ( unseenBy . size ( ) > 0 ) { m_mailbox . send ( Longs . toArray ( unseenBy ) , e . getValue ( ) ) ; m_recoveryLog . info ( "Agreement, fowarding to " + CoreUtils . hsIdCollectionToString ( unseenBy ) + " " + e . getValue ( ) ) ; } itr . remove ( ) ; } } } while ( ! haveEnough || m_seeker . needForward ( forwardStallCount ) ) ; return true ; |
public class RestApiClient { /** * Delete admin from chatroom .
* @ param roomName
* the room name
* @ param jid
* the jid
* @ return the response */
public Response deleteAdmin ( String roomName , String jid ) { } } | return restClient . delete ( "chatrooms/" + roomName + "/admins/" + jid , new HashMap < String , String > ( ) ) ; |
public class MembershipHandlerImpl { /** * { @ inheritDoc } */
public void addMembershipEventListener ( MembershipEventListener listener ) { } } | SecurityHelper . validateSecurityPermission ( PermissionConstants . MANAGE_LISTENERS ) ; listeners . add ( listener ) ; |
public class DescribeServicesResult { /** * The service metadata for the service or services in the response .
* < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use
* { @ link # setServices ( java . util . Collection ) } or { @ link # withServices ( java . util . Collection ) } if you want to override
* the existing values .
* @ param services
* The service metadata for the service or services in the response .
* @ return Returns a reference to this object so that method calls can be chained together . */
public DescribeServicesResult withServices ( Service ... services ) { } } | if ( this . services == null ) { setServices ( new java . util . ArrayList < Service > ( services . length ) ) ; } for ( Service ele : services ) { this . services . add ( ele ) ; } return this ; |
public class ShardingEncryptorStrategy { /** * Get sharding encryptor .
* @ param logicTableName logic table name
* @ param columnName column name
* @ return optional of sharding encryptor */
public Optional < ShardingEncryptor > getShardingEncryptor ( final String logicTableName , final String columnName ) { } } | return Collections2 . filter ( columns , new Predicate < ColumnNode > ( ) { @ Override public boolean apply ( final ColumnNode input ) { return input . equals ( new ColumnNode ( logicTableName , columnName ) ) ; } } ) . isEmpty ( ) ? Optional . < ShardingEncryptor > absent ( ) : Optional . of ( shardingEncryptor ) ; |
public class ThreadSet { /** * Run query using this as an initial thread set . */
public < T extends SingleThreadSetQuery . Result < SetType , RuntimeType , ThreadType > > T query ( SingleThreadSetQuery < T > query ) { } } | return query . < SetType , RuntimeType , ThreadType > query ( ( SetType ) this ) ; |
public class SparseMatrix { /** * Parses { @ link SparseMatrix } from the given CSV string .
* @ param csv the CSV string representing a matrix
* @ return a parsed matrix */
public static SparseMatrix fromCSV ( String csv ) { } } | return Matrix . fromCSV ( csv ) . to ( Matrices . SPARSE ) ; |
public class Record { /** * Get value { @ link Float } value
* @ param label target label
* @ return { @ link Float } value of the label . If it is not null . */
public Float getValueFloat ( String label ) { } } | PrimitiveObject o = getPrimitiveObject ( VALUE , label , ObjectUtil . FLOAT , "Float" ) ; if ( o == null ) { return null ; } return ( Float ) o . getObject ( ) ; |
public class AbstractLoader { /** * Replaces the host wildcard from an incoming config with a proper hostname .
* @ param input the input config .
* @ param hostname the hostname to replace it with .
* @ return a replaced configuration . */
protected String replaceHostWildcard ( String input , NetworkAddress hostname ) { } } | return input . replace ( "$HOST" , hostname . address ( ) ) ; |
public class MultiPartParser { private void setState ( FieldState state ) { } } | if ( DEBUG ) LOG . debug ( "{}:{} --> {}" , _state , _fieldState , state ) ; _fieldState = state ; |
public class ParallelTaskBuilder { /** * Sets the ssh password .
* @ param password
* the password
* @ return the parallel task builder */
public ParallelTaskBuilder setSshPassword ( String password ) { } } | this . sshMeta . setPassword ( password ) ; this . sshMeta . setSshLoginType ( SshLoginType . PASSWORD ) ; return this ; |
public class Jinx { /** * Build a multipart body request . */
private byte [ ] buildMultipartBody ( Map < String , String > params , byte [ ] photoData , String boundary ) throws JinxException { } } | ByteArrayOutputStream buffer = new ByteArrayOutputStream ( ) ; try { String filename = params . get ( "filename" ) ; if ( JinxUtils . isNullOrEmpty ( filename ) ) { filename = "image.jpg" ; } String fileMimeType = params . get ( "filemimetype" ) ; if ( JinxUtils . isNullOrEmpty ( fileMimeType ) ) { fileMimeType = "image/jpeg" ; } buffer . write ( ( "--" + boundary + "\r\n" ) . getBytes ( JinxConstants . UTF8 ) ) ; for ( Map . Entry < String , String > entry : params . entrySet ( ) ) { String key = entry . getKey ( ) ; if ( ! key . equals ( "filename" ) && ! key . equals ( "filemimetype" ) ) { buffer . write ( ( "Content-Disposition: form-data; name=\"" + key + "\"\r\n\r\n" ) . getBytes ( JinxConstants . UTF8 ) ) ; buffer . write ( ( entry . getValue ( ) ) . getBytes ( JinxConstants . UTF8 ) ) ; buffer . write ( ( "\r\n" + "--" + boundary + "\r\n" ) . getBytes ( JinxConstants . UTF8 ) ) ; } } buffer . write ( ( "Content-Disposition: form-data; name=\"photo\"; filename=\"" + filename + "\";\r\n" ) . getBytes ( JinxConstants . UTF8 ) ) ; buffer . write ( ( "Content-Type: " + fileMimeType + "\r\n\r\n" ) . getBytes ( JinxConstants . UTF8 ) ) ; buffer . write ( photoData ) ; buffer . write ( ( "\r\n" + "--" + boundary + "--\r\n" ) . getBytes ( JinxConstants . UTF8 ) ) ; // NOTE : last boundary has - - suffix
} catch ( Exception e ) { throw new JinxException ( "Unable to build multipart body." , e ) ; } if ( this . isVerboseLogging ( ) && this . isMultipartLogging ( ) ) { JinxLogger . getLogger ( ) . log ( "Multipart body: " + buffer . toString ( ) ) ; } return buffer . toByteArray ( ) ; |
public class GeometryService { /** * Calculate whether or not two given geometries intersect each other .
* @ param one The first geometry to check for intersection with the second .
* @ param two The second geometry to check for intersection with the first .
* @ return Returns true or false . */
public static boolean intersects ( Geometry one , Geometry two ) { } } | if ( one == null || two == null || isEmpty ( one ) || isEmpty ( two ) ) { return false ; } if ( Geometry . POINT . equals ( one . getGeometryType ( ) ) ) { return intersectsPoint ( one , two ) ; } else if ( Geometry . LINE_STRING . equals ( one . getGeometryType ( ) ) ) { return intersectsLineString ( one , two ) ; } else if ( Geometry . MULTI_POINT . equals ( one . getGeometryType ( ) ) || Geometry . MULTI_LINE_STRING . equals ( one . getGeometryType ( ) ) ) { return intersectsMultiSomething ( one , two ) ; } List < Coordinate > coords1 = new ArrayList < Coordinate > ( ) ; List < Coordinate > coords2 = new ArrayList < Coordinate > ( ) ; getAllCoordinates ( one , coords1 ) ; getAllCoordinates ( two , coords2 ) ; for ( int i = 0 ; i < coords1 . size ( ) - 1 ; i ++ ) { for ( int j = 0 ; j < coords2 . size ( ) - 1 ; j ++ ) { if ( MathService . intersectsLineSegment ( coords1 . get ( i ) , coords1 . get ( i + 1 ) , coords2 . get ( j ) , coords2 . get ( j + 1 ) ) ) { return true ; } } } return false ; |
public class Perl5Util { /** * find occurence of a pattern in a string ( same like indexOf ) , but dont return first ocurence , it
* return struct with all information
* @ param strPattern
* @ param strInput
* @ param offset
* @ param caseSensitive
* @ return
* @ throws MalformedPatternException */
public static Object find ( String strPattern , String strInput , int offset , boolean caseSensitive , boolean matchAll ) throws MalformedPatternException { } } | Perl5Matcher matcher = new Perl5Matcher ( ) ; PatternMatcherInput input = new PatternMatcherInput ( strInput ) ; Array matches = new ArrayImpl ( ) ; int compileOptions = caseSensitive ? 0 : Perl5Compiler . CASE_INSENSITIVE_MASK ; compileOptions += Perl5Compiler . SINGLELINE_MASK ; if ( offset < 1 ) offset = 1 ; Pattern pattern = getPattern ( strPattern , compileOptions ) ; if ( offset <= strInput . length ( ) ) { input . setCurrentOffset ( offset - 1 ) ; while ( matcher . contains ( input , pattern ) ) { Struct matchStruct = getMatchStruct ( matcher . getMatch ( ) ) ; if ( ! matchAll ) { return matchStruct ; } matches . appendEL ( matchStruct ) ; } if ( matches . size ( ) != 0 ) { return matches ; } } Array posArray = new ArrayImpl ( ) ; Array lenArray = new ArrayImpl ( ) ; Array matchArray = new ArrayImpl ( ) ; posArray . appendEL ( Constants . INTEGER_0 ) ; lenArray . appendEL ( Constants . INTEGER_0 ) ; matchArray . appendEL ( "" ) ; Struct struct = new StructImpl ( ) ; struct . setEL ( "pos" , posArray ) ; struct . setEL ( "len" , lenArray ) ; struct . setEL ( "match" , matchArray ) ; if ( matchAll ) { matches . appendEL ( struct ) ; return matches ; } return struct ; |
public class Linqy { /** * Create a new iterable by applying a mapper function to each
* element of a given sequence . */
public static < F , T > Iterable < T > map ( final Iterable < F > from , final Mapper < ? super F , T > mapper ) { } } | return new Iterable < T > ( ) { @ Override public Iterator < T > iterator ( ) { return new MappingIterator < F , T > ( from . iterator ( ) , mapper ) ; } } ; |
public class Matrix4f { /** * Store the values of the given matrix < code > m < / code > into < code > this < / code > matrix .
* Note that due to the given matrix < code > m < / code > storing values in double - precision and < code > this < / code > matrix storing
* them in single - precision , there is the possibility to lose precision .
* @ see # Matrix4f ( Matrix4dc )
* @ see # get ( Matrix4d )
* @ param m
* the matrix to copy the values from
* @ return this */
public Matrix4f set ( Matrix4dc m ) { } } | this . _m00 ( ( float ) m . m00 ( ) ) ; this . _m01 ( ( float ) m . m01 ( ) ) ; this . _m02 ( ( float ) m . m02 ( ) ) ; this . _m03 ( ( float ) m . m03 ( ) ) ; this . _m10 ( ( float ) m . m10 ( ) ) ; this . _m11 ( ( float ) m . m11 ( ) ) ; this . _m12 ( ( float ) m . m12 ( ) ) ; this . _m13 ( ( float ) m . m13 ( ) ) ; this . _m20 ( ( float ) m . m20 ( ) ) ; this . _m21 ( ( float ) m . m21 ( ) ) ; this . _m22 ( ( float ) m . m22 ( ) ) ; this . _m23 ( ( float ) m . m23 ( ) ) ; this . _m30 ( ( float ) m . m30 ( ) ) ; this . _m31 ( ( float ) m . m31 ( ) ) ; this . _m32 ( ( float ) m . m32 ( ) ) ; this . _m33 ( ( float ) m . m33 ( ) ) ; this . _properties ( m . properties ( ) ) ; return this ; |
public class TaskDao { /** * TaskManager use only */
public void goIdle ( String id ) { } } | setTaskState ( id , Task . IDLE ) ; setActiveLogId ( id , null ) ; |
public class CurationManager { /** * Look through part of the form data for a relationship .
* @ param oid
* The object ID of the current object
* @ param field
* The full field String to store for comparisons
* @ param config
* The config relating to the relationship we are looking for
* @ param baseNode
* The JSON node the relationship should be under
* @ return JsonObject A relationship in JSON , or null if not found */
private JsonObject lookForRelation ( String oid , String field , JsonSimple config , JsonSimple baseNode ) { } } | JsonObject newRelation = new JsonObject ( ) ; newRelation . put ( "field" , field ) ; newRelation . put ( "authority" , true ) ; // * * - 1 - * * EXCLUSIONS
List < String > exPath = config . getStringList ( "excludeCondition" , "path" ) ; String exValue = config . getString ( null , "excludeCondition" , "value" ) ; if ( exPath != null && ! exPath . isEmpty ( ) && exValue != null ) { String value = baseNode . getString ( null , exPath . toArray ( ) ) ; if ( value != null && value . equals ( exValue ) ) { log . info ( "Excluding relationship '{}' based on config" , field ) ; return null ; } } String exStartsWith = config . getString ( null , "excludeCondition" , "startsWith" ) ; String exDoesntStartWith = config . getString ( null , "excludeCondition" , "doesntStartWith" ) ; if ( exPath != null && ! exPath . isEmpty ( ) && exStartsWith != null ) { String value = baseNode . getString ( null , exPath . toArray ( ) ) ; if ( value != null && value . startsWith ( exStartsWith ) ) { log . info ( "Excluding relationship '{}' based on config" , field ) ; return null ; } } else { if ( exPath != null ) { String value = baseNode . getString ( null , exPath . toArray ( ) ) ; if ( value != null ) { if ( exDoesntStartWith instanceof String && ! value . startsWith ( exDoesntStartWith ) ) { log . info ( "Excluding relationship '{}' based on config" , field ) ; return null ; } } } } // * * - 2 - * * IDENTIFIER
// Inside that object where can we find the identifier
List < String > idPath = config . getStringList ( "identifier" ) ; if ( idPath == null || idPath . isEmpty ( ) ) { log . error ( "Ignoring invalid relationship '{}'. No 'identifier'" + " provided in configuration" , field ) ; return null ; } String id = baseNode . getString ( null , idPath . toArray ( ) ) ; if ( id != null && ! id . equals ( "" ) ) { newRelation . put ( "identifier" , id . trim ( ) ) ; } else { log . info ( "Relationship '{}' has no identifier, ignoring!" , field ) ; return null ; } // * * - 3 - * * RELATIONSHIP TYPE
// Relationship type , it may be static and provided for us . . .
String staticRelation = config . getString ( null , "relationship" ) ; List < String > relPath = null ; if ( staticRelation == null ) { // . . . or it could be found in the form data
relPath = config . getStringList ( "relationship" ) ; } // But we have to have one .
if ( staticRelation == null && ( relPath == null || relPath . isEmpty ( ) ) ) { log . error ( "Ignoring invalid relationship '{}'. No relationship" + " String of path in configuration" , field ) ; return null ; } String relString = null ; if ( staticRelation != null ) { relString = staticRelation ; } else { relString = baseNode . getString ( "hasAssociationWith" , relPath . toArray ( ) ) ; } if ( relString == null || relString . equals ( "" ) ) { log . info ( "Relationship '{}' has no type, ignoring!" , field ) ; return null ; } newRelation . put ( "relationship" , relString ) ; // * * - 4 - * * REVERSE RELATIONS
String revRelation = systemConfig . getString ( "hasAssociationWith" , "curation" , "reverseMappings" , relString ) ; newRelation . put ( "reverseRelationship" , revRelation ) ; // * * - 5 - * * DESCRIPTION
String description = config . getString ( null , "description" ) ; if ( description != null ) { newRelation . put ( "description" , description ) ; } // * * - 6 - * * SYSTEM / BROKER
String system = config . getString ( "mint" , "system" ) ; if ( system != null && system . equals ( "mint" ) ) { newRelation . put ( "broker" , mintBroker ) ; } else { newRelation . put ( "broker" , brokerUrl ) ; // ReDBox record ' s should also be told that the ID is an OID
// JCU : causes an exception in CurationManager .
// checkChildren ( ) will convert the identifier to an oid when a
// ' curation - confirm ' is processed
// newRelation . put ( " oid " , id ) ;
} // * * - 7 - * * OPTIONAL
boolean optional = config . getBoolean ( false , "optional" ) ; if ( optional ) { newRelation . put ( "optional" , optional ) ; } return newRelation ; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.