signature
stringlengths 43
39.1k
| implementation
stringlengths 0
450k
|
|---|---|
public class JoinExamples { /** * Join two collections , using country code as the key . */
static PCollection < String > joinEvents ( PCollection < TableRow > eventsTable , PCollection < TableRow > countryCodes ) throws Exception { } }
|
final TupleTag < String > eventInfoTag = new TupleTag < > ( ) ; final TupleTag < String > countryInfoTag = new TupleTag < > ( ) ; // transform both input collections to tuple collections , where the keys are country
// codes in both cases .
PCollection < KV < String , String > > eventInfo = eventsTable . apply ( ParDo . of ( new ExtractEventDataFn ( ) ) ) ; PCollection < KV < String , String > > countryInfo = countryCodes . apply ( ParDo . of ( new ExtractCountryInfoFn ( ) ) ) ; // country code ' key ' - > CGBKR ( < event info > , < country name > )
PCollection < KV < String , CoGbkResult > > kvpCollection = KeyedPCollectionTuple . of ( eventInfoTag , eventInfo ) . and ( countryInfoTag , countryInfo ) . apply ( CoGroupByKey . create ( ) ) ; // Process the CoGbkResult elements generated by the CoGroupByKey transform .
// country code ' key ' - > string of < event info > , < country name >
PCollection < KV < String , String > > finalResultCollection = kvpCollection . apply ( "Process" , ParDo . of ( new DoFn < KV < String , CoGbkResult > , KV < String , String > > ( ) { @ ProcessElement public void processElement ( ProcessContext c ) { KV < String , CoGbkResult > e = c . element ( ) ; String countryCode = e . getKey ( ) ; String countryName = "none" ; countryName = e . getValue ( ) . getOnly ( countryInfoTag ) ; for ( String eventInfo : c . element ( ) . getValue ( ) . getAll ( eventInfoTag ) ) { // Generate a string that combines information from both collection values
c . output ( KV . of ( countryCode , "Country name: " + countryName + ", Event info: " + eventInfo ) ) ; } } } ) ) ; // write to GCS
PCollection < String > formattedResults = finalResultCollection . apply ( "Format" , ParDo . of ( new DoFn < KV < String , String > , String > ( ) { @ ProcessElement public void processElement ( ProcessContext c ) { String outputstring = "Country code: " + c . element ( ) . getKey ( ) + ", " + c . element ( ) . getValue ( ) ; c . output ( outputstring ) ; } } ) ) ; return formattedResults ;
|
public class ProtoLexer { /** * $ ANTLR start " FIXED64" */
public final void mFIXED64 ( ) throws RecognitionException { } }
|
try { int _type = FIXED64 ; int _channel = DEFAULT_TOKEN_CHANNEL ; // com / dyuproject / protostuff / parser / ProtoLexer . g : 183:5 : ( ' fixed64 ' )
// com / dyuproject / protostuff / parser / ProtoLexer . g : 183:9 : ' fixed64'
{ match ( "fixed64" ) ; } state . type = _type ; state . channel = _channel ; } finally { }
|
public class CheckpointStatsTracker { /** * Register the exposed metrics .
* @ param metricGroup Metric group to use for the metrics . */
private void registerMetrics ( MetricGroup metricGroup ) { } }
|
metricGroup . gauge ( NUMBER_OF_CHECKPOINTS_METRIC , new CheckpointsCounter ( ) ) ; metricGroup . gauge ( NUMBER_OF_IN_PROGRESS_CHECKPOINTS_METRIC , new InProgressCheckpointsCounter ( ) ) ; metricGroup . gauge ( NUMBER_OF_COMPLETED_CHECKPOINTS_METRIC , new CompletedCheckpointsCounter ( ) ) ; metricGroup . gauge ( NUMBER_OF_FAILED_CHECKPOINTS_METRIC , new FailedCheckpointsCounter ( ) ) ; metricGroup . gauge ( LATEST_RESTORED_CHECKPOINT_TIMESTAMP_METRIC , new LatestRestoredCheckpointTimestampGauge ( ) ) ; metricGroup . gauge ( LATEST_COMPLETED_CHECKPOINT_SIZE_METRIC , new LatestCompletedCheckpointSizeGauge ( ) ) ; metricGroup . gauge ( LATEST_COMPLETED_CHECKPOINT_DURATION_METRIC , new LatestCompletedCheckpointDurationGauge ( ) ) ; metricGroup . gauge ( LATEST_COMPLETED_CHECKPOINT_ALIGNMENT_BUFFERED_METRIC , new LatestCompletedCheckpointAlignmentBufferedGauge ( ) ) ; metricGroup . gauge ( LATEST_COMPLETED_CHECKPOINT_EXTERNAL_PATH_METRIC , new LatestCompletedCheckpointExternalPathGauge ( ) ) ;
|
public class ViewFetcher { /** * Compares if the specified views are identical . This is used instead of View . compare
* as it always returns false in cases where the View tree is refreshed .
* @ param firstView the first view
* @ param secondView the second view
* @ return true if views are equal */
private boolean areViewsIdentical ( View firstView , View secondView ) { } }
|
if ( firstView . getId ( ) != secondView . getId ( ) || ! firstView . getClass ( ) . isAssignableFrom ( secondView . getClass ( ) ) ) { return false ; } if ( firstView . getParent ( ) != null && firstView . getParent ( ) instanceof View && secondView . getParent ( ) != null && secondView . getParent ( ) instanceof View ) { return areViewsIdentical ( ( View ) firstView . getParent ( ) , ( View ) secondView . getParent ( ) ) ; } else { return true ; }
|
public class NioClientManager { /** * Handle a SelectionKey which was selected */
private void handleKey ( SelectionKey key ) throws IOException { } }
|
// We could have a ! isValid ( ) key here if the connection is already closed at this point
if ( key . isValid ( ) && key . isConnectable ( ) ) { // ie a client connection which has finished the initial connect process
// Create a ConnectionHandler and hook everything together
PendingConnect data = ( PendingConnect ) key . attachment ( ) ; StreamConnection connection = data . connection ; SocketChannel sc = ( SocketChannel ) key . channel ( ) ; ConnectionHandler handler = new ConnectionHandler ( connection , key , connectedHandlers ) ; try { if ( sc . finishConnect ( ) ) { log . info ( "Connected to {}" , sc . socket ( ) . getRemoteSocketAddress ( ) ) ; key . interestOps ( ( key . interestOps ( ) | SelectionKey . OP_READ ) & ~ SelectionKey . OP_CONNECT ) . attach ( handler ) ; connection . connectionOpened ( ) ; data . future . set ( data . address ) ; } else { log . warn ( "Failed to connect to {}" , sc . socket ( ) . getRemoteSocketAddress ( ) ) ; handler . closeConnection ( ) ; // Failed to connect for some reason
data . future . setException ( new ConnectException ( "Unknown reason" ) ) ; data . future = null ; } } catch ( Exception e ) { // If e is a CancelledKeyException , there is a race to get to interestOps after finishConnect ( ) which
// may cause this . Otherwise it may be any arbitrary kind of connection failure .
// Calling sc . socket ( ) . getRemoteSocketAddress ( ) here throws an exception , so we can only log the error itself
Throwable cause = Throwables . getRootCause ( e ) ; log . warn ( "Failed to connect with exception: {}: {}" , cause . getClass ( ) . getName ( ) , cause . getMessage ( ) , e ) ; handler . closeConnection ( ) ; data . future . setException ( cause ) ; data . future = null ; } } else // Process bytes read
ConnectionHandler . handleKey ( key ) ;
|
public class ArgumentProcessor { /** * Processes command line arguments .
* @ param args The command line arguments to process .
* @ param state The application state object . */
public void process ( String [ ] args , T state ) { } }
|
process ( new ArrayQueue < String > ( args ) , state ) ;
|
public class TimeZoneUtil { /** * translate timezone string format to a timezone
* @ param strTimezoneTrimmed
* @ return */
public static TimeZone toTimeZone ( String strTimezone , TimeZone defaultValue ) { } }
|
if ( strTimezone == null ) return defaultValue ; String strTimezoneTrimmed = StringUtil . replace ( strTimezone . trim ( ) . toLowerCase ( ) , " " , "" , false ) ; TimeZone tz = getTimeZoneFromIDS ( strTimezoneTrimmed ) ; if ( tz != null ) return tz ; // parse GMT followd by a number
float gmtOffset = Float . NaN ; if ( strTimezoneTrimmed . startsWith ( "gmt" ) ) gmtOffset = getGMTOffset ( strTimezoneTrimmed . substring ( 3 ) . trim ( ) , Float . NaN ) ; else if ( strTimezoneTrimmed . startsWith ( "etc/gmt" ) ) gmtOffset = getGMTOffset ( strTimezoneTrimmed . substring ( 7 ) . trim ( ) , Float . NaN ) ; else if ( strTimezoneTrimmed . startsWith ( "utc" ) ) gmtOffset = getGMTOffset ( strTimezoneTrimmed . substring ( 3 ) . trim ( ) , Float . NaN ) ; else if ( strTimezoneTrimmed . startsWith ( "etc/utc" ) ) gmtOffset = getGMTOffset ( strTimezoneTrimmed . substring ( 7 ) . trim ( ) , Float . NaN ) ; if ( ! Float . isNaN ( gmtOffset ) ) { strTimezoneTrimmed = "etc/gmt" + ( gmtOffset >= 0 ? "+" : "" ) + Caster . toString ( gmtOffset ) ; tz = getTimeZoneFromIDS ( strTimezoneTrimmed ) ; if ( tz != null ) return tz ; } // display name in all variations
if ( ! StringUtil . isEmpty ( strTimezoneTrimmed ) ) { tz = dn . get ( strTimezoneTrimmed ) ; if ( tz != null ) return tz ; Iterator < Object > it = IDS . values ( ) . iterator ( ) ; Object o ; while ( it . hasNext ( ) ) { o = it . next ( ) ; if ( o instanceof TimeZone ) { tz = ( TimeZone ) o ; if ( strTimezone . equalsIgnoreCase ( tz . getDisplayName ( true , TimeZone . SHORT , Locale . US ) ) || strTimezone . equalsIgnoreCase ( tz . getDisplayName ( false , TimeZone . SHORT , Locale . US ) ) || strTimezone . equalsIgnoreCase ( tz . getDisplayName ( true , TimeZone . LONG , Locale . US ) ) || strTimezone . equalsIgnoreCase ( tz . getDisplayName ( false , TimeZone . LONG , Locale . US ) ) ) { dn . put ( strTimezoneTrimmed , tz ) ; return tz ; } } } } return defaultValue ;
|
public class TurfMeta { /** * Private helper method to be used with other methods in this class .
* @ param pointList the { @ code List } of { @ link Point } s .
* @ param feature the { @ link Feature } that you ' d like
* to extract the Points from .
* @ param excludeWrapCoord whether or not to include the final
* coordinate of LinearRings that wraps the ring
* in its iteration . Used if a { @ link Feature } in the
* { @ link FeatureCollection } that ' s passed through
* this method , is a { @ link Polygon } or { @ link MultiPolygon }
* geometry .
* @ return a { @ code List } made up of { @ link Point } s .
* @ since 4.8.0 */
@ NonNull private static List < Point > addCoordAll ( @ NonNull List < Point > pointList , @ NonNull Feature feature , @ NonNull boolean excludeWrapCoord ) { } }
|
return coordAllFromSingleGeometry ( pointList , feature . geometry ( ) , excludeWrapCoord ) ;
|
public class DefaultSchemaManager { /** * Only one schema for a table should contain an OCCVersion field mapping .
* This method will compare two schemas and return true if only one has an
* OCC _ VERSION field .
* @ param entitySchema1
* The first schema to compare
* @ param entitySchema2
* The second schema to compare
* @ return True if compatible , otherwise False . */
private boolean validateCompatibleWithTableOccVersion ( EntitySchema entitySchema1 , EntitySchema entitySchema2 ) { } }
|
boolean foundOccMapping = false ; for ( FieldMapping fieldMapping : entitySchema1 . getColumnMappingDescriptor ( ) . getFieldMappings ( ) ) { if ( fieldMapping . getMappingType ( ) == MappingType . OCC_VERSION ) { foundOccMapping = true ; break ; } } if ( foundOccMapping ) { for ( FieldMapping fieldMapping : entitySchema2 . getColumnMappingDescriptor ( ) . getFieldMappings ( ) ) { if ( fieldMapping . getMappingType ( ) == MappingType . OCC_VERSION ) { LOG . warn ( "Field: " + fieldMapping . getFieldName ( ) + " in schema " + entitySchema2 . getName ( ) + " conflicts with an occVersion field in " + entitySchema1 . getName ( ) ) ; return false ; } } } return true ;
|
public class VpcPeeringConnectionVpcInfo { /** * The IPv6 CIDR block for the VPC .
* @ param ipv6CidrBlockSet
* The IPv6 CIDR block for the VPC . */
public void setIpv6CidrBlockSet ( java . util . Collection < Ipv6CidrBlock > ipv6CidrBlockSet ) { } }
|
if ( ipv6CidrBlockSet == null ) { this . ipv6CidrBlockSet = null ; return ; } this . ipv6CidrBlockSet = new com . amazonaws . internal . SdkInternalList < Ipv6CidrBlock > ( ipv6CidrBlockSet ) ;
|
public class BackupManager { /** * Return a stream for the specified backup
* @ param metaData the backup to get
* @ return the stream or null if the stream doesn ' t exist
* @ throws Exception errors */
public BackupStream getBackupStream ( BackupMetaData metaData ) throws Exception { } }
|
return backupProvider . get ( ) . getBackupStream ( exhibitor , metaData , getBackupConfig ( ) ) ;
|
public class UnionQueryAnalyzer { /** * Returns a list of all primary and alternate keys , stripped of ordering . */
private List < Set < ChainedProperty < S > > > getKeys ( ) throws SupportException , RepositoryException { } }
|
StorableInfo < S > info = StorableIntrospector . examine ( mIndexAnalyzer . getStorableType ( ) ) ; List < Set < ChainedProperty < S > > > keys = new ArrayList < Set < ChainedProperty < S > > > ( ) ; keys . add ( stripOrdering ( info . getPrimaryKey ( ) . getProperties ( ) ) ) ; for ( StorableKey < S > altKey : info . getAlternateKeys ( ) ) { keys . add ( stripOrdering ( altKey . getProperties ( ) ) ) ; } // Also fold in all unique indexes , just in case they weren ' t reported
// as actual keys .
Collection < StorableIndex < S > > indexes = mRepoAccess . storageAccessFor ( getStorableType ( ) ) . getAllIndexes ( ) ; for ( StorableIndex < S > index : indexes ) { if ( ! index . isUnique ( ) ) { continue ; } int propCount = index . getPropertyCount ( ) ; Set < ChainedProperty < S > > props = new LinkedHashSet < ChainedProperty < S > > ( propCount ) ; for ( int i = 0 ; i < propCount ; i ++ ) { props . add ( index . getOrderedProperty ( i ) . getChainedProperty ( ) ) ; } keys . add ( props ) ; } return keys ;
|
public class AbstractMemberWriter { /** * Get the inherited summary header for the given class .
* @ param classDoc the class the inherited member belongs to
* @ return a content tree for the inherited summary header */
public Content getInheritedSummaryHeader ( ClassDoc classDoc ) { } }
|
Content inheritedTree = writer . getMemberTreeHeader ( ) ; writer . addInheritedSummaryHeader ( this , classDoc , inheritedTree ) ; return inheritedTree ;
|
public class CLI { /** * Create the available parameters for NER tagging . */
private void loadServerParameters ( ) { } }
|
serverParser . addArgument ( "-p" , "--port" ) . required ( true ) . help ( "Port to be assigned to the server.\n" ) ; serverParser . addArgument ( "-m" , "--model" ) . required ( true ) . help ( "Pass the model to do the tagging as a parameter.\n" ) ; serverParser . addArgument ( "--clearFeatures" ) . required ( false ) . choices ( "yes" , "no" , "docstart" ) . setDefault ( Flags . DEFAULT_FEATURE_FLAG ) . help ( "Reset the adaptive features every sentence; defaults to 'no'; if -DOCSTART- marks" + " are present, choose 'docstart'.\n" ) ; serverParser . addArgument ( "-l" , "--language" ) . required ( true ) . choices ( "ca" , "de" , "en" , "es" , "eu" , "fr" , "gl" , "it" , "nl" , "pt" , "ru" ) . help ( "Choose language.\n" ) ; serverParser . addArgument ( "-o" , "--outputFormat" ) . required ( false ) . choices ( "conll03" , "conll02" , "naf" ) . setDefault ( Flags . DEFAULT_OUTPUT_FORMAT ) . help ( "Choose output format; it defaults to NAF.\n" ) ; serverParser . addArgument ( "--lexer" ) . choices ( "numeric" ) . setDefault ( Flags . DEFAULT_LEXER ) . required ( false ) . help ( "Use lexer rules for NERC tagging; it defaults to false.\n" ) ; serverParser . addArgument ( "--dictTag" ) . required ( false ) . choices ( "tag" , "post" ) . setDefault ( Flags . DEFAULT_DICT_OPTION ) . help ( "Choose to directly tag entities by dictionary look-up; if the 'tag' option is chosen, " + "only tags entities found in the dictionary; if 'post' option is chosen, it will " + "post-process the results of the statistical model.\n" ) ; serverParser . addArgument ( "--dictPath" ) . required ( false ) . setDefault ( Flags . DEFAULT_DICT_PATH ) . help ( "Provide the path to the dictionaries for direct dictionary tagging; it ONLY WORKS if --dictTag " + "option is activated.\n" ) ;
|
public class AbstractSerializer { /** * * * * InputStream * * * */
@ SuppressWarnings ( "resource" ) protected ISynchronizationPoint < ? extends Exception > serializeInputStreamValue ( SerializationContext context , InputStream in , String path , List < SerializationRule > rules ) { } }
|
return serializeIOReadableValue ( context , new IOFromInputStream ( in , in . toString ( ) , Threading . getUnmanagedTaskManager ( ) , priority ) , path , rules ) ;
|
public class MicroServiceTemplateSupport { /** * 页面获取指定节点数据 ( data ) */
public String getNodeData ( String nodePath , String formId , String tableName , String dataColName , String idColName ) { } }
|
MicroMetaDao microDao = getInnerDao ( ) ; String select = dataColName + "->>'$." + dianNode ( nodePath ) + "' as dyna_data" ; String sql = "select " + select + " from " + tableName + " where " + idColName + "=?" ; Object [ ] paramArray = new Object [ 1 ] ; paramArray [ 0 ] = formId ; Map retMap = microDao . querySingleObjJoinByCondition ( sql , paramArray ) ; // 返回的一定是个map
if ( retMap != null ) { return ( String ) retMap . get ( "dyna_data" ) ; } return null ;
|
public class KmeansCalculator { /** * ベース中心点配列とマージ対象中心点配列の各々のユークリッド距離を算出する 。
* @ param baseCentroids ベース中心点配列
* @ param targetCentroids マージ対象中心点配列
* @ param centroidNum 中心点数
* @ return ユークリッド距離リスト */
protected static List < CentroidMapping > calculateDistances ( double [ ] [ ] baseCentroids , double [ ] [ ] targetCentroids , int centroidNum ) { } }
|
// ベース中心点配列とマージ対象中心点配列の各々のユークリッド距離を算出
List < CentroidMapping > allDistance = new ArrayList < > ( ) ; for ( int baseIndex = 0 ; baseIndex < centroidNum ; baseIndex ++ ) { for ( int targetIndex = 0 ; targetIndex < centroidNum ; targetIndex ++ ) { CentroidMapping centroidMapping = new CentroidMapping ( ) ; centroidMapping . setBaseIndex ( baseIndex ) ; centroidMapping . setTargetIndex ( targetIndex ) ; double distance = MathUtils . distance ( baseCentroids [ baseIndex ] , targetCentroids [ targetIndex ] ) ; centroidMapping . setEuclideanDistance ( distance ) ; allDistance . add ( centroidMapping ) ; } } return allDistance ;
|
public class AOContainerItemStream { /** * Prints the Message Details to the xml output . */
public void xmlWriteOn ( FormattedWriter writer ) throws IOException { } }
|
if ( durablePseudoDestID != null ) { writer . newLine ( ) ; writer . taggedValue ( "durablePseudoDestID" , durablePseudoDestID ) ; } if ( durableSubName != null ) { writer . newLine ( ) ; writer . taggedValue ( "durableSubName" , durableSubName ) ; }
|
public class AmazonNeptuneClient { /** * Forces a failover for a DB cluster .
* A failover for a DB cluster promotes one of the Read Replicas ( read - only instances ) in the DB cluster to be the
* primary instance ( the cluster writer ) .
* Amazon Neptune will automatically fail over to a Read Replica , if one exists , when the primary instance fails .
* You can force a failover when you want to simulate a failure of a primary instance for testing . Because each
* instance in a DB cluster has its own endpoint address , you will need to clean up and re - establish any existing
* connections that use those endpoint addresses when the failover is complete .
* @ param failoverDBClusterRequest
* @ return Result of the FailoverDBCluster operation returned by the service .
* @ throws DBClusterNotFoundException
* < i > DBClusterIdentifier < / i > does not refer to an existing DB cluster .
* @ throws InvalidDBClusterStateException
* The DB cluster is not in a valid state .
* @ throws InvalidDBInstanceStateException
* The specified DB instance is not in the < i > available < / i > state .
* @ sample AmazonNeptune . FailoverDBCluster
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / neptune - 2014-10-31 / FailoverDBCluster " target = " _ top " > AWS API
* Documentation < / a > */
@ Override public DBCluster failoverDBCluster ( FailoverDBClusterRequest request ) { } }
|
request = beforeClientExecution ( request ) ; return executeFailoverDBCluster ( request ) ;
|
public class GraphHopper { /** * Reads the configuration from a CmdArgs object which can be manually filled , or via
* CmdArgs . read ( String [ ] args ) */
public GraphHopper init ( CmdArgs args ) { } }
|
args . merge ( CmdArgs . readFromSystemProperties ( ) ) ; if ( args . has ( "osmreader.osm" ) ) throw new IllegalArgumentException ( "Instead osmreader.osm use datareader.file, for other changes see core/files/changelog.txt" ) ; String tmpOsmFile = args . get ( "datareader.file" , "" ) ; if ( ! isEmpty ( tmpOsmFile ) ) dataReaderFile = tmpOsmFile ; String graphHopperFolder = args . get ( "graph.location" , "" ) ; if ( isEmpty ( graphHopperFolder ) && isEmpty ( ghLocation ) ) { if ( isEmpty ( dataReaderFile ) ) throw new IllegalArgumentException ( "If no graph.location is provided you need to specify an OSM file." ) ; graphHopperFolder = pruneFileEnd ( dataReaderFile ) + "-gh" ; } // graph
setGraphHopperLocation ( graphHopperFolder ) ; defaultSegmentSize = args . getInt ( "graph.dataaccess.segment_size" , defaultSegmentSize ) ; String graphDATypeStr = args . get ( "graph.dataaccess" , "RAM_STORE" ) ; dataAccessType = DAType . fromString ( graphDATypeStr ) ; sortGraph = args . getBool ( "graph.do_sort" , sortGraph ) ; removeZipped = args . getBool ( "graph.remove_zipped" , removeZipped ) ; int bytesForFlags = args . getInt ( "graph.bytes_for_flags" , 4 ) ; String flagEncodersStr = args . get ( "graph.flag_encoders" , "" ) ; if ( ! flagEncodersStr . isEmpty ( ) ) { EncodingManager . Builder emBuilder = EncodingManager . createBuilder ( flagEncoderFactory , flagEncodersStr , bytesForFlags ) ; emBuilder . setEnableInstructions ( args . getBool ( "datareader.instructions" , true ) ) ; emBuilder . setPreferredLanguage ( args . get ( "datareader.preferred_language" , "" ) ) ; setEncodingManager ( emBuilder . build ( ) ) ; } if ( args . get ( "graph.locktype" , "native" ) . equals ( "simple" ) ) lockFactory = new SimpleFSLockFactory ( ) ; else lockFactory = new NativeFSLockFactory ( ) ; // elevation
String eleProviderStr = toLowerCase ( args . get ( "graph.elevation.provider" , "noop" ) ) ; this . smoothElevation = args . getBool ( "graph.elevation.smoothing" , false ) ; // keep fallback until 0.8
boolean eleCalcMean = args . has ( "graph.elevation.calcmean" ) ? args . getBool ( "graph.elevation.calcmean" , false ) : args . getBool ( "graph.elevation.calc_mean" , false ) ; String cacheDirStr = args . get ( "graph.elevation.cache_dir" , "" ) ; if ( cacheDirStr . isEmpty ( ) ) cacheDirStr = args . get ( "graph.elevation.cachedir" , "" ) ; String baseURL = args . get ( "graph.elevation.base_url" , "" ) ; if ( baseURL . isEmpty ( ) ) args . get ( "graph.elevation.baseurl" , "" ) ; boolean removeTempElevationFiles = args . getBool ( "graph.elevation.cgiar.clear" , true ) ; removeTempElevationFiles = args . getBool ( "graph.elevation.clear" , removeTempElevationFiles ) ; DAType elevationDAType = DAType . fromString ( args . get ( "graph.elevation.dataaccess" , "MMAP" ) ) ; ElevationProvider tmpProvider = ElevationProvider . NOOP ; if ( eleProviderStr . equalsIgnoreCase ( "srtm" ) ) { tmpProvider = new SRTMProvider ( cacheDirStr ) ; } else if ( eleProviderStr . equalsIgnoreCase ( "cgiar" ) ) { tmpProvider = new CGIARProvider ( cacheDirStr ) ; } else if ( eleProviderStr . equalsIgnoreCase ( "gmted" ) ) { tmpProvider = new GMTEDProvider ( cacheDirStr ) ; } else if ( eleProviderStr . equalsIgnoreCase ( "srtmgl1" ) ) { tmpProvider = new SRTMGL1Provider ( cacheDirStr ) ; } else if ( eleProviderStr . equalsIgnoreCase ( "multi" ) ) { tmpProvider = new MultiSourceElevationProvider ( cacheDirStr ) ; } tmpProvider . setAutoRemoveTemporaryFiles ( removeTempElevationFiles ) ; tmpProvider . setCalcMean ( eleCalcMean ) ; if ( ! baseURL . isEmpty ( ) ) tmpProvider . setBaseURL ( baseURL ) ; tmpProvider . setDAType ( elevationDAType ) ; setElevationProvider ( tmpProvider ) ; // optimizable prepare
minNetworkSize = args . getInt ( "prepare.min_network_size" , minNetworkSize ) ; minOneWayNetworkSize = args . getInt ( "prepare.min_one_way_network_size" , minOneWayNetworkSize ) ; // prepare CH , LM , . . .
for ( RoutingAlgorithmFactoryDecorator decorator : algoDecorators ) { decorator . init ( args ) ; } // osm import
dataReaderWayPointMaxDistance = args . getDouble ( Routing . INIT_WAY_POINT_MAX_DISTANCE , dataReaderWayPointMaxDistance ) ; dataReaderWorkerThreads = args . getInt ( "datareader.worker_threads" , dataReaderWorkerThreads ) ; // index
preciseIndexResolution = args . getInt ( "index.high_resolution" , preciseIndexResolution ) ; maxRegionSearch = args . getInt ( "index.max_region_search" , maxRegionSearch ) ; // routing
maxVisitedNodes = args . getInt ( Routing . INIT_MAX_VISITED_NODES , Integer . MAX_VALUE ) ; maxRoundTripRetries = args . getInt ( RoundTrip . INIT_MAX_RETRIES , maxRoundTripRetries ) ; nonChMaxWaypointDistance = args . getInt ( Parameters . NON_CH . MAX_NON_CH_POINT_DISTANCE , Integer . MAX_VALUE ) ; return this ;
|
public class ReflectionUtil { /** * find and all non abstract classes that implement / extend
* baseClassOrInterface in the package packageName
* @ param packageName
* @ param baseClassOrInterface
* @ return */
@ SuppressWarnings ( "unchecked" ) public static < T > Set < Class < T > > loadClasses ( String packageName , Class < T > baseClassOrInterface ) { } }
|
Set < Class < T > > result = new HashSet < > ( ) ; try { Set < String > classNames = getClassNamesFromPackage ( packageName ) ; ClassLoader classLoader = Thread . currentThread ( ) . getContextClassLoader ( ) ; for ( String className : classNames ) { try { Class < ? > clazz = classLoader . loadClass ( packageName + "." + className ) ; if ( ! Modifier . isAbstract ( clazz . getModifiers ( ) ) && baseClassOrInterface . isAssignableFrom ( clazz ) ) { result . add ( ( Class < T > ) clazz ) ; } } catch ( Throwable e ) { logger . error ( e , e ) ; } } logger . info ( "Loaded " + result . size ( ) ) ; } catch ( Exception e ) { logger . error ( e , e ) ; } return result ;
|
public class TfIdf { /** * 平滑处理后的一系列文档的倒排词频
* @ param documentVocabularies 词表
* @ param < TERM > 词语类型
* @ return 一个词语 - > 倒排文档的Map */
public static < TERM > Map < TERM , Double > idf ( Iterable < Iterable < TERM > > documentVocabularies ) { } }
|
return idf ( documentVocabularies , true , true ) ;
|
public class DefaultMetaDataChecker { /** * 检查指定schema下是否包含指定的table
* @ throws IllegalMetaDataException */
@ Override public void check ( Connection conn , String scName , Collection < String > tbNames ) throws IllegalMetaDataException { } }
|
if ( scName == null ) { throw new IllegalArgumentException ( "[Check MetaData Failed] parameter 'scName' can't be null" ) ; } if ( tbNames == null || tbNames . isEmpty ( ) ) { throw new IllegalArgumentException ( "[Check MetaData Failed] parameter 'tbNames' can't be empty" ) ; } try { Set < String > set = getAllTables ( conn , scName ) ; if ( set == null || set . isEmpty ( ) ) { throw new IllegalMetaDataException ( "[Check MetaData Failed] Schema:'" + scName + "' has nothing tables. but in your configuration it requires table:" + DDRJSONUtils . toJSONString ( tbNames ) ) ; } for ( String tbName : tbNames ) { if ( ! set . contains ( tbName ) ) { throw new IllegalMetaDataException ( "[Check MetaData Failed] Schema:'" + scName + "' only has tables:" + DDRJSONUtils . toJSONString ( set ) + ", but in your configuration it requires table:" + tbName ) ; } } } catch ( SQLException e ) { throw new RuntimeException ( e ) ; }
|
public class DefaultTypeCache { /** * / * ( non - Javadoc )
* @ see org . apache . atlas . typesystem . types . cache . TypeCache # remove ( org .
* apache . atlas . typesystem . types . DataTypes . TypeCategory , java . lang . String ) */
@ Override public void remove ( TypeCategory typeCategory , String typeName ) throws AtlasException { } }
|
assertValidTypeCategory ( typeCategory ) ; remove ( typeName ) ;
|
public class CancelCertificateTransferRequestMarshaller { /** * Marshall the given parameter object . */
public void marshall ( CancelCertificateTransferRequest cancelCertificateTransferRequest , ProtocolMarshaller protocolMarshaller ) { } }
|
if ( cancelCertificateTransferRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( cancelCertificateTransferRequest . getCertificateId ( ) , CERTIFICATEID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
|
public class CRFppTxtModel { /** * 解析crf + + 生成的可可视txt文件
* @ return */
@ Override public CRFppTxtModel loadModel ( String modelPath ) throws Exception { } }
|
try ( InputStream is = new FileInputStream ( modelPath ) ) { loadModel ( new FileInputStream ( modelPath ) ) ; return this ; }
|
public class RoundedMoney { /** * ( non - Javadoc )
* @ see javax . money . MonetaryAmount # multiply ( Number ) */
@ Override public RoundedMoney multiply ( Number multiplicand ) { } }
|
BigDecimal bd = MoneyUtils . getBigDecimal ( multiplicand ) ; if ( isOne ( bd ) ) { return this ; } BigDecimal dec = number . multiply ( bd , Optional . ofNullable ( monetaryContext . get ( MathContext . class ) ) . orElse ( MathContext . DECIMAL64 ) ) ; return new RoundedMoney ( dec , currency , rounding ) . with ( rounding ) ;
|
public class WriterReaderPhaser { /** * Flip a phase in the { @ link WriterReaderPhaser } instance , { @ link WriterReaderPhaser # flipPhase ( ) }
* can only be called while holding the readerLock ( ) .
* { @ link WriterReaderPhaser # flipPhase ( ) } will return only after all writer critical sections ( protected by
* { @ link WriterReaderPhaser # writerCriticalSectionEnter ( ) } ( ) } and
* { @ link WriterReaderPhaser # writerCriticalSectionExit ( long ) } ( ) } ) that may have been in flight when the
* { @ link WriterReaderPhaser # flipPhase ( ) } call were made had completed .
* No actual writer critical section activity is required for { @ link WriterReaderPhaser # flipPhase ( ) } to
* succeed .
* However , { @ link WriterReaderPhaser # flipPhase ( ) } is lock - free with respect to calls to
* { @ link WriterReaderPhaser # writerCriticalSectionEnter ( ) } and
* { @ link WriterReaderPhaser # writerCriticalSectionExit ( long ) } . It may spin - wait for for active
* writer critical section code to complete .
* @ param yieldTimeNsec The amount of time ( in nanoseconds ) to sleep in each yield if yield loop is needed . */
public void flipPhase ( long yieldTimeNsec ) { } }
|
if ( ! readerLock . isHeldByCurrentThread ( ) ) { throw new IllegalStateException ( "flipPhase() can only be called while holding the readerLock()" ) ; } boolean nextPhaseIsEven = ( startEpoch < 0 ) ; // Current phase is odd . . .
long initialStartValue ; // First , clear currently unused [ next ] phase end epoch ( to proper initial value for phase ) :
if ( nextPhaseIsEven ) { initialStartValue = 0 ; evenEndEpochUpdater . lazySet ( this , initialStartValue ) ; } else { initialStartValue = Long . MIN_VALUE ; oddEndEpochUpdater . lazySet ( this , initialStartValue ) ; } // Next , reset start value , indicating new phase , and retain value at flip :
long startValueAtFlip = startEpochUpdater . getAndSet ( this , initialStartValue ) ; // Now , spin until previous phase end value catches up with start value at flip :
boolean caughtUp = false ; do { if ( nextPhaseIsEven ) { caughtUp = ( oddEndEpoch == startValueAtFlip ) ; } else { caughtUp = ( evenEndEpoch == startValueAtFlip ) ; } if ( ! caughtUp ) { if ( yieldTimeNsec == 0 ) { Thread . yield ( ) ; } else { try { TimeUnit . NANOSECONDS . sleep ( yieldTimeNsec ) ; } catch ( InterruptedException ex ) { } } } } while ( ! caughtUp ) ;
|
public class RangeVariable { /** * Only multiple EQUAL conditions are used
* @ param exprList list of expressions
* @ param index Index to use
* @ param isJoin whether a join or not */
void addIndexCondition ( Expression [ ] exprList , Index index , int colCount , boolean isJoin ) { } }
|
// VoltDB extension
if ( rangeIndex == index && isJoinIndex && ( ! isJoin ) && ( multiColumnCount > 0 ) && ( colCount == 0 ) ) { // This is one particular set of conditions which broke the classification of
// ON and WHERE clauses .
return ; } // End of VoltDB extension
rangeIndex = index ; isJoinIndex = isJoin ; for ( int i = 0 ; i < colCount ; i ++ ) { Expression e = exprList [ i ] ; indexEndCondition = ExpressionLogical . andExpressions ( indexEndCondition , e ) ; } if ( colCount == 1 ) { indexCondition = exprList [ 0 ] ; } else { findFirstExpressions = exprList ; isMultiFindFirst = true ; multiColumnCount = colCount ; }
|
public class Languages { /** * Returns the Java Locale for the Request & Response cycle . If the language
* specified in the Request / Response cycle can not be mapped to a Java
* Locale , the default language Locale is returned .
* @ param routeContext
* @ return a Java Locale */
public Locale getLocaleOrDefault ( RouteContext routeContext ) { } }
|
String language = getLanguageOrDefault ( routeContext ) ; return Locale . forLanguageTag ( language ) ;
|
public class RouteGuideClient { /** * Async client - streaming example . Sends { @ code numPoints } randomly chosen points from { @ code
* features } with a variable delay in between . Prints the statistics when they are sent from the
* server . */
public void recordRoute ( List < Feature > features , int numPoints ) throws InterruptedException { } }
|
info ( "*** RecordRoute" ) ; final CountDownLatch finishLatch = new CountDownLatch ( 1 ) ; StreamObserver < RouteSummary > responseObserver = new StreamObserver < RouteSummary > ( ) { @ Override public void onNext ( RouteSummary summary ) { info ( "Finished trip with {0} points. Passed {1} features. " + "Travelled {2} meters. It took {3} seconds." , summary . getPointCount ( ) , summary . getFeatureCount ( ) , summary . getDistance ( ) , summary . getElapsedTime ( ) ) ; if ( testHelper != null ) { testHelper . onMessage ( summary ) ; } } @ Override public void onError ( Throwable t ) { warning ( "RecordRoute Failed: {0}" , Status . fromThrowable ( t ) ) ; if ( testHelper != null ) { testHelper . onRpcError ( t ) ; } finishLatch . countDown ( ) ; } @ Override public void onCompleted ( ) { info ( "Finished RecordRoute" ) ; finishLatch . countDown ( ) ; } } ; StreamObserver < Point > requestObserver = asyncStub . recordRoute ( responseObserver ) ; try { // Send numPoints points randomly selected from the features list .
for ( int i = 0 ; i < numPoints ; ++ i ) { int index = random . nextInt ( features . size ( ) ) ; Point point = features . get ( index ) . getLocation ( ) ; info ( "Visiting point {0}, {1}" , RouteGuideUtil . getLatitude ( point ) , RouteGuideUtil . getLongitude ( point ) ) ; requestObserver . onNext ( point ) ; // Sleep for a bit before sending the next one .
Thread . sleep ( random . nextInt ( 1000 ) + 500 ) ; if ( finishLatch . getCount ( ) == 0 ) { // RPC completed or errored before we finished sending .
// Sending further requests won ' t error , but they will just be thrown away .
return ; } } } catch ( RuntimeException e ) { // Cancel RPC
requestObserver . onError ( e ) ; throw e ; } // Mark the end of requests
requestObserver . onCompleted ( ) ; // Receiving happens asynchronously
if ( ! finishLatch . await ( 1 , TimeUnit . MINUTES ) ) { warning ( "recordRoute can not finish within 1 minutes" ) ; }
|
public class NameOpValue { /** * Adds an array of values to the list of values .
* Each element in the array is converted into a
* Value object and inserted as a separate value
* into the list of values .
* @ param strValues the array of values to add . */
public void add ( String [ ] strValues ) { } }
|
if ( strValues == null ) return ; if ( values == null ) values = new LinkedList ( ) ; for ( int i = 0 ; i < strValues . length ; i ++ ) { values . add ( new Value ( strValues [ i ] ) ) ; }
|
public class Slice { /** * Returns a copy of this buffer ' s sub - region . Modifying the content of
* the returned buffer or this buffer does not affect each other at all . */
public Slice copySlice ( int index , int length ) { } }
|
checkPositionIndexes ( index , index + length , this . length ) ; index += offset ; byte [ ] copiedArray = new byte [ length ] ; System . arraycopy ( data , index , copiedArray , 0 , length ) ; return new Slice ( copiedArray ) ;
|
public class WicketImageExtensions { /** * Gets a non caching image from the given wicketId , contentType and the byte array data .
* @ param wicketId
* the id from the image for the html template .
* @ param contentType
* the content type of the image .
* @ param data
* the data for the image as an byte array .
* @ return the non caching image */
public static NonCachingImage getNonCachingImage ( final String wicketId , final String contentType , final byte [ ] data ) { } }
|
return new NonCachingImage ( wicketId , new DatabaseImageResource ( contentType , data ) ) ;
|
public class MariaDbDatabaseMetaData { /** * Retrieves a description of the stored procedures available in the given catalog . Only procedure
* descriptions matching the schema and procedure name criteria are returned . They are ordered
* by
* < code > PROCEDURE _ CAT < / code > ,
* < code > PROCEDURE _ SCHEM < / code > , < code > PROCEDURE _ NAME < / code > and < code > SPECIFIC _ NAME < / code > .
* < P > Each procedure description has the the following columns : < / p >
* < OL > < LI > < B > PROCEDURE _ CAT < / B >
* String { @ code = > } procedure catalog ( may be
* < code > null < / code > ) < LI > < B > PROCEDURE _ SCHEM < / B > String { @ code = > } procedure schema ( may be
* < code > null < / code > ) < LI > < B > PROCEDURE _ NAME < / B > String
* { @ code = > } procedure name < LI > reserved for future use < LI > reserved for future use < LI >
* reserved for future use < LI > < B > REMARKS < / B > String { @ code = > } explanatory comment on the
* procedure < LI > < B > PROCEDURE _ TYPE < / B > short { @ code = > } kind of procedure : < UL > < LI >
* procedureResultUnknown - Cannot determine if a return value will be returned < LI >
* procedureNoResult - Does not return a return value < LI > procedureReturnsResult - Returns a
* return value < / UL > < LI > < B > SPECIFIC _ NAME < / B > String { @ code = > } The name which uniquely
* identifies this procedure within its schema .
* < / OL >
* A user may not have permissions to execute any of the procedures that are returned by
* < code > getProcedures < / code >
* @ param catalog a catalog name ; must match the catalog name as it is stored in the
* database ; " " retrieves those without a catalog ;
* < code > null < / code > means that the catalog name should not be used
* to narrow the search
* @ param schemaPattern a schema name pattern ; must match the schema name as it is stored
* in the database ; " " retrieves those without a schema ;
* < code > null < / code > means that the schema name should not be used to
* narrow the search
* @ param procedureNamePattern a procedure name pattern ; must match the procedure name as it is
* stored in the database
* @ return < code > ResultSet < / code > - each row is a procedure description
* @ throws SQLException if a database access error occurs
* @ see # getSearchStringEscape */
public ResultSet getProcedures ( String catalog , String schemaPattern , String procedureNamePattern ) throws SQLException { } }
|
String sql = "SELECT ROUTINE_SCHEMA PROCEDURE_CAT,NULL PROCEDURE_SCHEM, ROUTINE_NAME PROCEDURE_NAME," + " NULL RESERVED1, NULL RESERVED2, NULL RESERVED3," + " CASE ROUTINE_TYPE " + " WHEN 'FUNCTION' THEN " + procedureReturnsResult + " WHEN 'PROCEDURE' THEN " + procedureNoResult + " ELSE " + procedureResultUnknown + " END PROCEDURE_TYPE," + " ROUTINE_COMMENT REMARKS, SPECIFIC_NAME " + " FROM INFORMATION_SCHEMA.ROUTINES " + " WHERE " + catalogCond ( "ROUTINE_SCHEMA" , catalog ) + " AND " + patternCond ( "ROUTINE_NAME" , procedureNamePattern ) + "/* AND ROUTINE_TYPE='PROCEDURE' */" ; return executeQuery ( sql ) ;
|
public class CharEscaperBuilder { /** * Add a new mapping from an index to an object to the escaping . */
@ CanIgnoreReturnValue public CharEscaperBuilder addEscape ( char c , String r ) { } }
|
map . put ( c , checkNotNull ( r ) ) ; if ( c > max ) { max = c ; } return this ;
|
public class SequenceLabelFactory { /** * Constructs a { @ link SequenceLabel } as a String with corresponding offsets
* and length from which to calculate start and end position of the Name .
* @ param seqString
* string to be added to a Sequence object
* @ param seqType
* the type of the Sequence
* @ param offset
* the starting offset of the Sequence
* @ param length
* of the string
* @ return a new Sequence object */
public final SequenceLabel createSequence ( final String seqString , final String seqType , final int offset , final int length ) { } }
|
final SequenceLabel sequence = new SequenceLabel ( ) ; sequence . setValue ( seqString ) ; sequence . setType ( seqType ) ; sequence . setStartOffset ( offset ) ; sequence . setSequenceLength ( length ) ; return sequence ;
|
public class GVRIndexBuffer { /** * Updates the indices in the index buffer from a Java IntBuffer .
* All of the entries of the input int buffer are copied into
* the storage for the index buffer . The new indices must be the
* same size as the old indices - the index buffer size cannot be changed .
* @ param data char array containing the new values
* @ throws IllegalArgumentException if int buffer is wrong size */
public void setIntVec ( IntBuffer data ) { } }
|
if ( data == null ) { throw new IllegalArgumentException ( "Input buffer for indices cannot be null" ) ; } if ( getIndexSize ( ) != 4 ) { throw new UnsupportedOperationException ( "Cannot update integer indices with short array" ) ; } if ( data . isDirect ( ) ) { if ( ! NativeIndexBuffer . setIntVec ( getNative ( ) , data ) ) { throw new UnsupportedOperationException ( "Input array is wrong size" ) ; } } else if ( data . hasArray ( ) ) { if ( ! NativeIndexBuffer . setIntArray ( getNative ( ) , data . array ( ) ) ) { throw new IllegalArgumentException ( "Data array incompatible with index buffer" ) ; } } else { throw new UnsupportedOperationException ( "IntBuffer type not supported. Must be direct or have backing array" ) ; }
|
public class WhiteboxImpl { /** * Check if all arguments are of the same type .
* @ param arguments the arguments
* @ return true , if successful */
static boolean areAllArgumentsOfSameType ( Object [ ] arguments ) { } }
|
if ( arguments == null || arguments . length <= 1 ) { return true ; } // Handle null values
int index = 0 ; Object object = null ; while ( object == null && index < arguments . length ) { object = arguments [ index ++ ] ; } if ( object == null ) { return true ; } // End of handling null values
final Class < ? > firstArgumentType = getType ( object ) ; for ( int i = index ; i < arguments . length ; i ++ ) { final Object argument = arguments [ i ] ; if ( argument != null && ! getType ( argument ) . isAssignableFrom ( firstArgumentType ) ) { return false ; } } return true ;
|
public class MessageInfo { /** * Parse a subset of the type comment ; valid matches are simple types , compound types ,
* union types and custom types . */
MessageType parseAlternative ( String text ) { } }
|
// try with custom types
if ( text . charAt ( 0 ) == '\'' ) { int end = text . indexOf ( '\'' , 1 ) ; return new MessageType . CustomType ( text . substring ( 1 , end ) ) ; } // try with simple types
for ( SimpleType st : SimpleType . values ( ) ) { if ( text . equals ( st . kindName ( ) ) ) { return st ; } } // try with compound types
for ( CompoundType . Kind ck : CompoundType . Kind . values ( ) ) { if ( text . startsWith ( ck . kindName ) ) { MessageType elemtype = parseAlternative ( text . substring ( ck . kindName . length ( ) + 1 ) . trim ( ) ) ; return new CompoundType ( ck , elemtype ) ; } } // try with union types
for ( UnionType . Kind uk : UnionType . Kind . values ( ) ) { if ( text . startsWith ( uk . kindName ) ) { return new UnionType ( uk ) ; } } // no match - report a warning
System . err . println ( "WARNING - unrecognized type: " + text ) ; return SimpleType . UNKNOWN ;
|
public class WriterOutputStream { public void write ( byte [ ] b ) throws IOException { } }
|
if ( _encoding == null ) _writer . write ( new String ( b ) ) ; else _writer . write ( new String ( b , _encoding ) ) ;
|
public class SimplifierExtractor { /** * Add a single extractor
* @ param pName name of the extractor
* @ param pExtractor the extractor itself */
protected final void addExtractor ( String pName , AttributeExtractor < T > pExtractor ) { } }
|
extractorMap . put ( pName , pExtractor ) ;
|
public class TAudioFileReader { /** * Get an AudioFileFormat object for a URL . This method calls
* getAudioFileFormat ( InputStream , long ) . Subclasses should not override
* this method unless there are really severe reasons . Normally , it is
* sufficient to implement getAudioFileFormat ( InputStream , long ) .
* @ param urlthe URL to read from .
* @ returnan AudioFileFormat instance containing information from the
* header of the URL passed in .
* @ throws javax . sound . sampled . UnsupportedAudioFileException
* @ throws java . io . IOException */
@ Override public AudioFileFormat getAudioFileFormat ( URL url ) throws UnsupportedAudioFileException , IOException { } }
|
LOG . log ( Level . FINE , "TAudioFileReader.getAudioFileFormat(URL): begin (class: {0})" , getClass ( ) . getSimpleName ( ) ) ; long lFileLengthInBytes = getDataLength ( url ) ; AudioFileFormat audioFileFormat ; try ( InputStream inputStream = url . openStream ( ) ) { audioFileFormat = getAudioFileFormat ( inputStream , lFileLengthInBytes ) ; } LOG . log ( Level . FINE , "TAudioFileReader.getAudioFileFormat(URL): end" ) ; return audioFileFormat ;
|
public class FaceletCompositionContextImpl { /** * Mark a component to be deleted from the tree . The component to be deleted is addded on the
* current level . This is done from ComponentSupport . markForDeletion
* @ param id
* @ param component the component marked for deletion . */
private void markComponentForDeletion ( String id , UIComponent component ) { } }
|
_componentsMarkedForDeletion . get ( _deletionLevel ) . put ( id , component ) ;
|
public class VmwareIaasHandler { /** * ( non - Javadoc )
* @ see net . roboconf . target . api . TargetHandler
* # terminateMachine ( net . roboconf . target . api . TargetHandlerParameters , java . lang . String ) */
@ Override public void terminateMachine ( TargetHandlerParameters parameters , String machineId ) throws TargetException { } }
|
try { cancelMachineConfigurator ( machineId ) ; final ServiceInstance vmwareServiceInstance = getServiceInstance ( parameters . getTargetProperties ( ) ) ; VirtualMachine vm = getVirtualMachine ( vmwareServiceInstance , machineId ) ; if ( vm == null ) throw new TargetException ( "Error vm: " + machineId + " was not found" ) ; Task task = vm . powerOffVM_Task ( ) ; try { if ( ! ( task . waitForTask ( ) ) . equals ( Task . SUCCESS ) ) throw new TargetException ( "Error while trying to stop vm: " + machineId ) ; } catch ( InterruptedException ignore ) { /* ignore */
} task = vm . destroy_Task ( ) ; try { if ( ! ( task . waitForTask ( ) ) . equals ( Task . SUCCESS ) ) throw new TargetException ( "Error while trying to remove vm: " + machineId ) ; } catch ( InterruptedException ignore ) { /* ignore */
} } catch ( Exception e ) { throw new TargetException ( e ) ; }
|
public class VisTextArea { /** * Updates the current line , checking the cursor position in the text * */
void updateCurrentLine ( ) { } }
|
int index = calculateCurrentLineIndex ( cursor ) ; int line = index / 2 ; // Special case when cursor moves to the beginning of the line from the end of another and a word
// wider than the box
if ( index % 2 == 0 || index + 1 >= linesBreak . size || cursor != linesBreak . items [ index ] || linesBreak . items [ index + 1 ] != linesBreak . items [ index ] ) { if ( line < linesBreak . size / 2 || text . length ( ) == 0 || text . charAt ( text . length ( ) - 1 ) == ENTER_ANDROID || text . charAt ( text . length ( ) - 1 ) == ENTER_DESKTOP ) { cursorLine = line ; } }
|
public class ExpressRoutePortsLocationsInner { /** * Retrieves a single ExpressRoutePort peering location , including the list of available bandwidths available at said peering location .
* @ param locationName Name of the requested ExpressRoutePort peering location .
* @ param serviceCallback the async ServiceCallback to handle successful and failed responses .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the { @ link ServiceFuture } object */
public ServiceFuture < ExpressRoutePortsLocationInner > getAsync ( String locationName , final ServiceCallback < ExpressRoutePortsLocationInner > serviceCallback ) { } }
|
return ServiceFuture . fromResponse ( getWithServiceResponseAsync ( locationName ) , serviceCallback ) ;
|
public class DefaultFaceletFactory { /** * Resolves a path based on the passed URL . If the path starts with ' / ' , then resolve the path against
* { @ link javax . faces . context . ExternalContext # getResource ( java . lang . String )
* javax . faces . context . ExternalContext # getResource ( java . lang . String ) } . Otherwise create a new URL via
* { @ link URL # URL ( java . net . URL , java . lang . String ) URL ( URL , String ) } .
* @ param source
* base to resolve from
* @ param path
* relative path to the source
* @ return resolved URL
* @ throws IOException */
public URL resolveURL ( FacesContext context , URL source , String path ) throws IOException { } }
|
if ( path . startsWith ( "/" ) ) { context . getAttributes ( ) . put ( LAST_RESOURCE_RESOLVED , null ) ; URL url = resolveURL ( context , path ) ; if ( url == null ) { throw new FileNotFoundException ( path + " Not Found in ExternalContext as a Resource" ) ; } return url ; } else { return new URL ( source , path ) ; }
|
public class AWSSecurityHubClient { /** * Lists all findings - generating solutions ( products ) whose findings you ' ve subscribed to receive in Security Hub .
* @ param listEnabledProductsForImportRequest
* @ return Result of the ListEnabledProductsForImport operation returned by the service .
* @ throws InternalException
* Internal server error .
* @ throws LimitExceededException
* The request was rejected because it attempted to create resources beyond the current AWS account limits .
* The error code describes the limit exceeded .
* @ throws InvalidAccessException
* AWS Security Hub is not enabled for the account used to make this request .
* @ sample AWSSecurityHub . ListEnabledProductsForImport
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / securityhub - 2018-10-26 / ListEnabledProductsForImport "
* target = " _ top " > AWS API Documentation < / a > */
@ Override public ListEnabledProductsForImportResult listEnabledProductsForImport ( ListEnabledProductsForImportRequest request ) { } }
|
request = beforeClientExecution ( request ) ; return executeListEnabledProductsForImport ( request ) ;
|
public class TypeBindings { /** * / * Accessors */
public ResolvedType findBoundType ( String name ) { } }
|
for ( int i = 0 , len = _names . length ; i < len ; ++ i ) { if ( name . equals ( _names [ i ] ) ) { return _types [ i ] ; } } return null ;
|
public class RestfulApiClient { /** * function to dispatch the request and pass back the response . */
protected T sendAndReturn ( final HttpUriRequest request ) throws IOException { } }
|
try ( CloseableHttpClient client = HttpClients . createDefault ( ) ) { return this . parseResponse ( client . execute ( request ) ) ; }
|
public class AptUtil { /** * Test if the given type is an internal type .
* @ param typeUtils
* @ param type
* @ return True if the type is an internal type , false otherwise .
* @ author vvakame */
public static boolean isInternalType ( Types typeUtils , TypeMirror type ) { } }
|
Element element = ( ( TypeElement ) typeUtils . asElement ( type ) ) . getEnclosingElement ( ) ; return element . getKind ( ) != ElementKind . PACKAGE ;
|
public class AnimaQuery { /** * Build a insert statement .
* @ param model model instance
* @ param < S >
* @ return insert sql */
private < S extends Model > String buildInsertSQL ( S model , List < Object > columnValues ) { } }
|
SQLParams sqlParams = SQLParams . builder ( ) . model ( model ) . columnValues ( columnValues ) . modelClass ( this . modelClass ) . tableName ( this . tableName ) . pkName ( this . primaryKeyColumn ) . build ( ) ; return Anima . of ( ) . dialect ( ) . insert ( sqlParams ) ;
|
public class AtomicMarkableReference { /** * Returns the current values of both the reference and the mark .
* Typical usage is { @ code boolean [ 1 ] holder ; ref = v . get ( holder ) ; } .
* @ param markHolder an array of size of at least one . On return ,
* { @ code markHolder [ 0 ] } will hold the value of the mark .
* @ return the current value of the reference */
public V get ( boolean [ ] markHolder ) { } }
|
Pair < V > pair = this . pair ; markHolder [ 0 ] = pair . mark ; return pair . reference ;
|
public class RestClientUtil { /** * 发送es restful sql请求下一页数据 , 获取返回值 , 返回值类型由beanType决定
* https : / / www . elastic . co / guide / en / elasticsearch / reference / current / sql - rest . html
* @ param beanType
* @ param cursor
* @ param metas
* @ param < T >
* @ return
* @ throws ElasticSearchException */
public < T > SQLResult < T > fetchQueryByCursor ( Class < T > beanType , String cursor , ColumnMeta [ ] metas ) throws ElasticSearchException { } }
|
if ( cursor == null ) { return null ; } SQLRestResponse result = this . client . executeRequest ( "/_xpack/sql" , new StringBuilder ( ) . append ( "{\"cursor\": \"" ) . append ( cursor ) . append ( "\"}" ) . toString ( ) , new SQLRestResponseHandler ( ) ) ; SQLResult < T > datas = ResultUtil . buildFetchSQLResult ( result , beanType , metas ) ; datas . setClientInterface ( this ) ; return datas ;
|
public class StorableIndex { /** * Parses an index descriptor and returns an index object .
* @ param desc name descriptor , as created by { @ link # getNameDescriptor }
* @ param info info on storable type
* @ return index represented by descriptor
* @ throws IllegalArgumentException if error in descriptor syntax or if it
* refers to unknown properties */
@ SuppressWarnings ( "unchecked" ) public static < S extends Storable > StorableIndex < S > parseNameDescriptor ( String desc , StorableInfo < S > info ) throws IllegalArgumentException { } }
|
String name = info . getStorableType ( ) . getName ( ) ; if ( ! desc . startsWith ( name ) ) { throw new IllegalArgumentException ( "Descriptor starts with wrong type name: \"" + desc + "\", \"" + name + '"' ) ; } Map < String , ? extends StorableProperty < S > > allProperties = info . getAllProperties ( ) ; List < StorableProperty < S > > properties = new ArrayList < StorableProperty < S > > ( ) ; List < Direction > directions = new ArrayList < Direction > ( ) ; boolean unique ; try { int pos = name . length ( ) ; if ( desc . charAt ( pos ++ ) != '~' ) { throw new IllegalArgumentException ( "Invalid syntax" ) ; } { int pos2 = nextSep ( desc , pos ) ; String attr = desc . substring ( pos , pos2 ) ; if ( attr . equals ( "U" ) ) { unique = true ; } else if ( attr . equals ( "N" ) ) { unique = false ; } else { throw new IllegalArgumentException ( "Unknown attribute" ) ; } pos = pos2 ; } while ( pos < desc . length ( ) ) { char sign = desc . charAt ( pos ++ ) ; if ( sign == '+' ) { directions . add ( Direction . ASCENDING ) ; } else if ( sign == '-' ) { directions . add ( Direction . DESCENDING ) ; } else if ( sign == '~' ) { directions . add ( Direction . UNSPECIFIED ) ; } else { throw new IllegalArgumentException ( "Unknown property direction" ) ; } int pos2 = nextSep ( desc , pos ) ; String propertyName = desc . substring ( pos , pos2 ) ; StorableProperty < S > property = allProperties . get ( propertyName ) ; if ( property == null ) { throw new IllegalArgumentException ( "Unknown property: " + propertyName ) ; } properties . add ( property ) ; pos = pos2 ; } } catch ( IndexOutOfBoundsException e ) { throw new IllegalArgumentException ( "Invalid syntax" ) ; } int size = properties . size ( ) ; if ( size == 0 || size != directions . size ( ) ) { throw new IllegalArgumentException ( "No properties specified" ) ; } StorableIndex < S > index = new StorableIndex < S > ( properties . toArray ( new StorableProperty [ size ] ) , directions . toArray ( new Direction [ size ] ) ) ; return index . unique ( unique ) ;
|
public class EmptyIterator { /** * Gets a singleton instance of the empty iterator .
* @ param < E > The type of the objects ( not ) returned by the iterator .
* @ return An instance of the iterator . */
public static < E > EmptyIterator < E > get ( ) { } }
|
@ SuppressWarnings ( "unchecked" ) EmptyIterator < E > iter = ( EmptyIterator < E > ) INSTANCE ; return iter ;
|
public class Scs_counts { /** * Column counts of LL ' = A or LL ' = A ' A , given parent & postordering
* @ param A
* column - compressed matrix
* @ param parent
* elimination tree of A
* @ param post
* postordering of parent
* @ param ata
* analyze A if false , A ' A otherwise
* @ return column counts of LL ' = A or LL ' = A ' A , null on error */
public static int [ ] cs_counts ( Scs A , int [ ] parent , int [ ] post , boolean ata ) { } }
|
int i , j , k , n , m , J , s , p , q , ATp [ ] , ATi [ ] , maxfirst [ ] , prevleaf [ ] , ancestor [ ] , colcount [ ] , w [ ] , first [ ] , delta [ ] ; int [ ] head = null , next = null ; int [ ] jleaf = new int [ 1 ] ; int head_offset = 0 , next_offset = 0 ; Scs AT ; if ( ! Scs_util . CS_CSC ( A ) || parent == null || post == null ) return ( null ) ; /* check inputs */
m = A . m ; n = A . n ; s = 4 * n + ( ata ? ( n + m + 1 ) : 0 ) ; delta = colcount = new int [ n ] ; /* allocate result */
w = new int [ s ] ; /* get workspace */
AT = Scs_transpose . cs_transpose ( A , false ) ; /* AT = A ' */
ancestor = w ; maxfirst = w ; int maxfirst_offset = n ; prevleaf = w ; int prevleaf_offset = 2 * n ; first = w ; int first_offset = 3 * n ; for ( k = 0 ; k < s ; k ++ ) w [ k ] = - 1 ; /* clear workspace w [ 0 . f . s - 1] */
for ( k = 0 ; k < n ; k ++ ) /* find first [ j ] */
{ j = post [ k ] ; delta [ j ] = ( first [ first_offset + j ] == - 1 ) ? 1 : 0 ; /* delta [ j ] = 1 if j is a leaf */
for ( ; j != - 1 && first [ first_offset + j ] == - 1 ; j = parent [ j ] ) first [ first_offset + j ] = k ; } ATp = AT . p ; ATi = AT . i ; if ( ata ) { int [ ] offsets = init_ata ( AT , post , w ) ; head = w ; head_offset = offsets [ 0 ] ; next = w ; next_offset = offsets [ 1 ] ; } for ( i = 0 ; i < n ; i ++ ) ancestor [ i ] = i ; /* each node in its own set */
for ( k = 0 ; k < n ; k ++ ) { j = post [ k ] ; /* j is the kth node in postordered etree */
if ( parent [ j ] != - 1 ) delta [ parent [ j ] ] -- ; /* j is not a root */
for ( J = HEAS ( k , j , head , head_offset , ata ) ; J != - 1 ; J = NEXT ( J , next , next_offset , ata ) ) /* J = j for LL ' = A case */
{ for ( p = ATp [ J ] ; p < ATp [ J + 1 ] ; p ++ ) { i = ATi [ p ] ; q = Scs_leaf . cs_leaf ( i , j , first , first_offset , maxfirst , maxfirst_offset , prevleaf , prevleaf_offset , ancestor , 0 , jleaf ) ; if ( jleaf [ 0 ] >= 1 ) delta [ j ] ++ ; /* A ( i , j ) is in skeleton */
if ( jleaf [ 0 ] == 2 ) delta [ q ] -- ; /* account for overlap in q */
} } if ( parent [ j ] != - 1 ) ancestor [ j ] = parent [ j ] ; } for ( j = 0 ; j < n ; j ++ ) /* sum up delta ' s of each child */
{ if ( parent [ j ] != - 1 ) colcount [ parent [ j ] ] += colcount [ j ] ; } return colcount ;
|
public class DOMDiffHandler { /** * Start the diff .
* This writes out the start of a & lt ; diff & gt ; node .
* @ param oldJar ignored
* @ param newJar ignored
* @ throws DiffException when there is an underlying exception , e . g .
* writing to a file caused an IOException */
public void startDiff ( String oldJar , String newJar ) throws DiffException { } }
|
Element tmp = doc . createElementNS ( XML_URI , "diff" ) ; tmp . setAttribute ( "old" , oldJar ) ; tmp . setAttribute ( "new" , newJar ) ; doc . appendChild ( tmp ) ; currentNode = tmp ;
|
public class Hash { /** * Method getHashText .
* @ param plainText
* @ param algorithm The algorithm to use like MD2 , MD5 , SHA - 1 , etc .
* @ return String
* @ throws NoSuchAlgorithmException */
public static String getHashText ( String plainText , String algorithm ) throws NoSuchAlgorithmException { } }
|
MessageDigest mdAlgorithm = MessageDigest . getInstance ( algorithm ) ; mdAlgorithm . update ( plainText . getBytes ( ) ) ; byte [ ] digest = mdAlgorithm . digest ( ) ; StringBuffer hexString = new StringBuffer ( ) ; for ( int i = 0 ; i < digest . length ; i ++ ) { plainText = Integer . toHexString ( 0xFF & digest [ i ] ) ; if ( plainText . length ( ) < 2 ) { plainText = "0" + plainText ; } hexString . append ( plainText ) ; } return hexString . toString ( ) ;
|
public class ServletContextLogger { /** * from interface LogChute */
public void init ( RuntimeServices rsvc ) { } }
|
// if we weren ' t constructed with a servlet context , try to obtain
// one via the application context
if ( _sctx == null ) { // first look for the servlet context directly
_sctx = ( ServletContext ) rsvc . getApplicationAttribute ( "ServletContext" ) ; } // if that didn ' t work , look for an application
if ( _sctx == null ) { // first look for an application
Application app = ( Application ) rsvc . getApplicationAttribute ( Application . VELOCITY_ATTR_KEY ) ; if ( app != null ) { _sctx = app . getServletContext ( ) ; } } // if we still don ' t have one , complain
if ( _sctx == null ) { rsvc . getLog ( ) . warn ( "ServletContextLogger: servlet context was not supplied. A user " + "of the servlet context logger must call " + "Velocity.setApplicationAttribute(\"ServletContext\", " + "getServletContext())." ) ; }
|
public class OrderBook { /** * Replace the amount for limitOrder ' s price in the provided list . */
private void update ( List < LimitOrder > asks , LimitOrder limitOrder ) { } }
|
int idx = Collections . binarySearch ( asks , limitOrder ) ; if ( idx >= 0 ) { asks . remove ( idx ) ; asks . add ( idx , limitOrder ) ; } else { asks . add ( - idx - 1 , limitOrder ) ; }
|
public class JoltUtils { /** * Navigate a JSON tree ( made up of Maps and Lists ) to " lookup " the value
* at a particular path .
* Example : given Json
* Object json =
* " b " : [ " x " , " y " , " z " ]
* navigate ( json , " a " , " b " , 0 ) will return " x " .
* It will traverse down the nested " a " and return the zeroth item of the " b " array .
* You will either get your data , or null .
* It should never throw an Exception ; even if
* - you ask to index an array with a negative number
* - you ask to index an array wiht a number bigger than the array size
* - you ask to index a map that does not exist
* - your input data has objects in it other than Map , List , String , Number .
* @ param source the source JSON object ( Map , List , String , Number )
* @ param paths varargs path you want to travel
* @ return the object of Type < T > at final destination */
public static < T > T navigate ( final Object source , final Object ... paths ) { } }
|
Object destination = source ; for ( Object path : paths ) { if ( path == null || destination == null ) { return null ; } if ( destination instanceof Map ) { destination = ( ( Map ) destination ) . get ( path ) ; } else if ( destination instanceof List ) { if ( ! ( path instanceof Integer ) ) { return null ; } List destList = ( List ) destination ; int pathInt = ( Integer ) path ; if ( pathInt < 0 || pathInt >= destList . size ( ) ) { return null ; } destination = destList . get ( pathInt ) ; } else { // the input at this level is not a Map or List
// so return null
return null ; } } return cast ( destination ) ;
|
public class CaseRuntimeDataServiceImpl { /** * Case instance queries */
@ Override public Collection < org . jbpm . services . api . model . NodeInstanceDesc > getActiveNodesForCase ( String caseId , QueryContext queryContext ) { } }
|
Map < String , Object > params = new HashMap < String , Object > ( ) ; params . put ( "caseId" , caseId + "%" ) ; applyQueryContext ( params , queryContext ) ; applyDeploymentFilter ( params ) ; List < org . jbpm . services . api . model . NodeInstanceDesc > nodeInstances = commandService . execute ( new QueryNameCommand < List < org . jbpm . services . api . model . NodeInstanceDesc > > ( "getActiveNodesForCase" , params ) ) ; return nodeInstances ;
|
public class LocalDateUtil { /** * 指定时间的当月第一天
* @ param localDate 时间
* @ return localDate */
public static LocalDate firstDay ( LocalDate localDate ) { } }
|
return LocalDate . of ( localDate . getYear ( ) , localDate . getMonth ( ) , 1 ) ;
|
public class StandaloneProcessRunnerProcess { /** * This is a special method that runs some code when this screen is opened as a task . */
public void run ( ) { } }
|
Map < String , Object > properties = this . getProperties ( ) ; String process = this . getProperty ( STANDALONE_PROCESS ) ; properties . put ( DBParams . PROCESS , process ) ; Environment env = new Environment ( properties ) ; MainApplication app = new MainApplication ( env , properties , null ) ; ProcessRunnerTask task = new ProcessRunnerTask ( app , null , properties ) ; task . run ( ) ; // Since I already have a processor thread ( Note : This method will free when done )
env . freeIfDone ( ) ;
|
public class TransformProcess { /** * Transforms a sequence
* of strings in to a sequence of writables
* ( very similar to { @ link # transformRawStringsToInput ( String . . . ) }
* for sequences
* @ param sequence the sequence to transform
* @ return the transformed input */
public List < List < Writable > > transformRawStringsToInputSequence ( List < List < String > > sequence ) { } }
|
List < List < Writable > > ret = new ArrayList < > ( ) ; for ( List < String > input : sequence ) ret . add ( transformRawStringsToInputList ( input ) ) ; return ret ;
|
public class ArchivalUrlContextResultURIConverterFactory { /** * / * ( non - Javadoc )
* @ see org . archive . wayback . replay . html . ContextResultURIConverterFactory # getContextConverter ( java . lang . String ) */
@ Override public ResultURIConverter getContextConverter ( String flags ) { } }
|
if ( flags == null ) { return converter ; } return new ArchivalUrlSpecialContextResultURIConverter ( flags ) ;
|
public class AbstractSurfaceDataType { /** * Gets the value of the genericApplicationPropertyOfSurfaceData property .
* This accessor method returns a reference to the live list ,
* not a snapshot . Therefore any modification you make to the
* returned list will be present inside the JAXB object .
* This is why there is not a < CODE > set < / CODE > method for the genericApplicationPropertyOfSurfaceData property .
* For example , to add a new item , do as follows :
* < pre >
* get _ GenericApplicationPropertyOfSurfaceData ( ) . add ( newItem ) ;
* < / pre >
* Objects of the following type ( s ) are allowed in the list
* { @ link JAXBElement } { @ code < } { @ link Object } { @ code > }
* { @ link JAXBElement } { @ code < } { @ link Object } { @ code > } */
public List < JAXBElement < Object > > get_GenericApplicationPropertyOfSurfaceData ( ) { } }
|
if ( _GenericApplicationPropertyOfSurfaceData == null ) { _GenericApplicationPropertyOfSurfaceData = new ArrayList < JAXBElement < Object > > ( ) ; } return this . _GenericApplicationPropertyOfSurfaceData ;
|
public class ESTemplate { /** * 通过主键删除数据
* @ param mapping 配置对象
* @ param pkVal 主键值
* @ param esFieldData 数据Map */
public void delete ( ESMapping mapping , Object pkVal , Map < String , Object > esFieldData ) { } }
|
if ( mapping . get_id ( ) != null ) { getBulk ( ) . add ( transportClient . prepareDelete ( mapping . get_index ( ) , mapping . get_type ( ) , pkVal . toString ( ) ) ) ; commitBulk ( ) ; } else { SearchResponse response = transportClient . prepareSearch ( mapping . get_index ( ) ) . setTypes ( mapping . get_type ( ) ) . setQuery ( QueryBuilders . termQuery ( mapping . getPk ( ) , pkVal ) ) . setSize ( 10000 ) . get ( ) ; for ( SearchHit hit : response . getHits ( ) ) { getBulk ( ) . add ( transportClient . prepareUpdate ( mapping . get_index ( ) , mapping . get_type ( ) , hit . getId ( ) ) . setDoc ( esFieldData ) ) ; commitBulk ( ) ; } }
|
public class Duration { /** * Converts the duration value to milliseconds .
* @ return the duration value in milliseconds ( will be negative if
* { @ link # isPrior } is true ) */
public long toMillis ( ) { } }
|
long totalSeconds = 0 ; if ( weeks != null ) { totalSeconds += 60L * 60 * 24 * 7 * weeks ; } if ( days != null ) { totalSeconds += 60L * 60 * 24 * days ; } if ( hours != null ) { totalSeconds += 60L * 60 * hours ; } if ( minutes != null ) { totalSeconds += 60L * minutes ; } if ( seconds != null ) { totalSeconds += seconds ; } if ( prior ) { totalSeconds *= - 1 ; } return totalSeconds * 1000 ;
|
public class CertUtil { /** * 将签名私钥证书文件读取为证书存储对象
* @ param pfxkeyfile
* 证书文件名
* @ param keypwd
* 证书密码
* @ param type
* 证书类型
* @ return 证书对象
* @ throws IOException */
private static KeyStore getKeyInfo ( String pfxkeyfile , String keypwd , String type ) throws IOException { } }
|
LogUtil . writeLog ( "加载签名证书==>" + pfxkeyfile ) ; FileInputStream fis = null ; try { KeyStore ks = KeyStore . getInstance ( type , "BC" ) ; LogUtil . writeLog ( "Load RSA CertPath=[" + pfxkeyfile + "],Pwd=[" + keypwd + "],type=[" + type + "]" ) ; fis = new FileInputStream ( pfxkeyfile ) ; char [ ] nPassword = null ; nPassword = null == keypwd || "" . equals ( keypwd . trim ( ) ) ? null : keypwd . toCharArray ( ) ; if ( null != ks ) { ks . load ( fis , nPassword ) ; } return ks ; } catch ( Exception e ) { LogUtil . writeErrorLog ( "getKeyInfo Error" , e ) ; return null ; } finally { if ( null != fis ) fis . close ( ) ; }
|
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link IdType } { @ code > } */
@ XmlElementDecl ( namespace = "http://www.w3.org/2005/Atom" , name = "id" , scope = FeedType . class ) public JAXBElement < IdType > createFeedTypeId ( IdType value ) { } }
|
return new JAXBElement < IdType > ( ENTRY_TYPE_ID_QNAME , IdType . class , FeedType . class , value ) ;
|
public class CreateEventSubscriptionRequest { /** * The list of identifiers of the event sources for which events will be returned . If not specified , then all
* sources are included in the response . An identifier must begin with a letter and must contain only ASCII letters ,
* digits , and hyphens ; it cannot end with a hyphen or contain two consecutive hyphens .
* < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use
* { @ link # setSourceIds ( java . util . Collection ) } or { @ link # withSourceIds ( java . util . Collection ) } if you want to
* override the existing values .
* @ param sourceIds
* The list of identifiers of the event sources for which events will be returned . If not specified , then all
* sources are included in the response . An identifier must begin with a letter and must contain only ASCII
* letters , digits , and hyphens ; it cannot end with a hyphen or contain two consecutive hyphens .
* @ return Returns a reference to this object so that method calls can be chained together . */
public CreateEventSubscriptionRequest withSourceIds ( String ... sourceIds ) { } }
|
if ( this . sourceIds == null ) { setSourceIds ( new java . util . ArrayList < String > ( sourceIds . length ) ) ; } for ( String ele : sourceIds ) { this . sourceIds . add ( ele ) ; } return this ;
|
public class XeGoogleLink { /** * Ctor .
* @ param req Request
* @ param app Google application ID
* @ param rel Related
* @ param redir Redirect URI
* @ return Source
* @ throws IOException If fails
* @ since 0.14
* @ checkstyle ParameterNumberCheck ( 4 lines ) */
private static XeSource make ( final Request req , final CharSequence app , final CharSequence rel , final CharSequence redir ) throws IOException { } }
|
return new XeLink ( rel , new Href ( "https://accounts.google.com/o/oauth2/auth" ) . with ( "client_id" , app ) . with ( "redirect_uri" , redir ) . with ( "response_type" , "code" ) . with ( "state" , new RqHref . Base ( req ) . href ( ) ) . with ( "scope" , "https://www.googleapis.com/auth/userinfo.profile" ) ) ;
|
public class ThreeViewEstimateMetricScene { /** * Tries a bunch of stuff to ensure that it can find the best solution which is physically possible */
private void findBestValidSolution ( BundleAdjustment < SceneStructureMetric > bundleAdjustment ) { } }
|
// prints out useful debugging information that lets you know how well it ' s converging
if ( verbose != null && verboseLevel > 0 ) bundleAdjustment . setVerbose ( verbose , 0 ) ; // Specifies convergence criteria
bundleAdjustment . configure ( convergeSBA . ftol , convergeSBA . gtol , convergeSBA . maxIterations ) ; bundleAdjustment . setParameters ( structure , observations ) ; bundleAdjustment . optimize ( structure ) ; // ensure that the points are in front of the camera and are a valid solution
if ( checkBehindCamera ( structure ) ) { if ( verbose != null ) verbose . println ( " flipping view" ) ; flipAround ( structure , observations ) ; bundleAdjustment . setParameters ( structure , observations ) ; bundleAdjustment . optimize ( structure ) ; } double bestScore = bundleAdjustment . getFitScore ( ) ; List < Se3_F64 > bestPose = new ArrayList < > ( ) ; List < BundlePinholeSimplified > bestCameras = new ArrayList < > ( ) ; for ( int i = 0 ; i < structure . views . length ; i ++ ) { BundlePinholeSimplified c = structure . cameras [ i ] . getModel ( ) ; bestPose . add ( structure . views [ i ] . worldToView . copy ( ) ) ; bestCameras . add ( c . copy ( ) ) ; } for ( int i = 0 ; i < structure . cameras . length ; i ++ ) { BundlePinholeSimplified c = structure . cameras [ i ] . getModel ( ) ; c . f = listPinhole . get ( i ) . fx ; c . k1 = c . k2 = 0 ; } // flip rotation assuming that it was done wrong
for ( int i = 1 ; i < structure . views . length ; i ++ ) { CommonOps_DDRM . transpose ( structure . views [ i ] . worldToView . R ) ; } triangulatePoints ( structure , observations ) ; bundleAdjustment . setParameters ( structure , observations ) ; bundleAdjustment . optimize ( structure ) ; if ( checkBehindCamera ( structure ) ) { if ( verbose != null ) verbose . println ( " flipping view" ) ; flipAround ( structure , observations ) ; bundleAdjustment . setParameters ( structure , observations ) ; bundleAdjustment . optimize ( structure ) ; } // revert to old settings
if ( verbose != null ) verbose . println ( " ORIGINAL / NEW = " + bestScore + " / " + bundleAdjustment . getFitScore ( ) ) ; if ( bundleAdjustment . getFitScore ( ) > bestScore ) { if ( verbose != null ) verbose . println ( " recomputing old structure" ) ; for ( int i = 0 ; i < structure . cameras . length ; i ++ ) { BundlePinholeSimplified c = structure . cameras [ i ] . getModel ( ) ; c . set ( bestCameras . get ( i ) ) ; structure . views [ i ] . worldToView . set ( bestPose . get ( i ) ) ; } triangulatePoints ( structure , observations ) ; bundleAdjustment . setParameters ( structure , observations ) ; bundleAdjustment . optimize ( structure ) ; if ( verbose != null ) verbose . println ( " score = " + bundleAdjustment . getFitScore ( ) ) ; }
|
public class FieldScopeImpl { private static FieldScope create ( FieldScopeLogic logic , Function < ? super Optional < Descriptor > , String > usingCorrespondenceStringFunction ) { } }
|
return new AutoValue_FieldScopeImpl ( logic , usingCorrespondenceStringFunction ) ;
|
public class ApplicationGatewaysInner { /** * Stops the specified application gateway in a resource group .
* @ param resourceGroupName The name of the resource group .
* @ param applicationGatewayName The name of the application gateway .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ throws CloudException thrown if the request is rejected by server
* @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent */
public void stop ( String resourceGroupName , String applicationGatewayName ) { } }
|
stopWithServiceResponseAsync ( resourceGroupName , applicationGatewayName ) . toBlocking ( ) . last ( ) . body ( ) ;
|
public class AccountUsageMarshaller { /** * Marshall the given parameter object . */
public void marshall ( AccountUsage accountUsage , ProtocolMarshaller protocolMarshaller ) { } }
|
if ( accountUsage == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( accountUsage . getTotalCodeSize ( ) , TOTALCODESIZE_BINDING ) ; protocolMarshaller . marshall ( accountUsage . getFunctionCount ( ) , FUNCTIONCOUNT_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
|
public class AmazonLexModelBuildingClient { /** * Gets information about all of the versions of a bot .
* The < code > GetBotVersions < / code > operation returns a < code > BotMetadata < / code > object for each version of a bot .
* For example , if a bot has three numbered versions , the < code > GetBotVersions < / code > operation returns four
* < code > BotMetadata < / code > objects in the response , one for each numbered version and one for the
* < code > $ LATEST < / code > version .
* The < code > GetBotVersions < / code > operation always returns at least one version , the < code > $ LATEST < / code > version .
* This operation requires permissions for the < code > lex : GetBotVersions < / code > action .
* @ param getBotVersionsRequest
* @ return Result of the GetBotVersions operation returned by the service .
* @ throws NotFoundException
* The resource specified in the request was not found . Check the resource and try again .
* @ throws LimitExceededException
* The request exceeded a limit . Try your request again .
* @ throws InternalFailureException
* An internal Amazon Lex error occurred . Try your request again .
* @ throws BadRequestException
* The request is not well formed . For example , a value is invalid or a required field is missing . Check the
* field values , and try again .
* @ sample AmazonLexModelBuilding . GetBotVersions
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / lex - models - 2017-04-19 / GetBotVersions " target = " _ top " > AWS API
* Documentation < / a > */
@ Override public GetBotVersionsResult getBotVersions ( GetBotVersionsRequest request ) { } }
|
request = beforeClientExecution ( request ) ; return executeGetBotVersions ( request ) ;
|
public class DJXYAreaChartBuilder { /** * Adds the specified serie column to the dataset with custom label .
* @ param column the serie column
* @ param label column the custom label */
public DJXYAreaChartBuilder addSerie ( AbstractColumn column , String label ) { } }
|
getDataset ( ) . addSerie ( column , label ) ; return this ;
|
public class NpmPackage { /** * get a stream that contains the contents of one of the files in a folder
* @ param folder
* @ param file
* @ return
* @ throws IOException */
public InputStream load ( String folder , String file ) throws IOException { } }
|
if ( content . containsKey ( folder + "/" + file ) ) return new ByteArrayInputStream ( content . get ( folder + "/" + file ) ) ; else { File f = new File ( Utilities . path ( path , folder , file ) ) ; if ( f . exists ( ) ) return new FileInputStream ( f ) ; throw new IOException ( "Unable to find the file " + folder + "/" + file + " in the package " + name ( ) ) ; }
|
public class EnvironmentSettingsInner { /** * Modify properties of environment setting .
* @ param resourceGroupName The name of the resource group .
* @ param labAccountName The name of the lab Account .
* @ param labName The name of the lab .
* @ param environmentSettingName The name of the environment Setting .
* @ param environmentSetting Represents settings of an environment , from which environment instances would be created
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ throws CloudException thrown if the request is rejected by server
* @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @ return the EnvironmentSettingInner object if successful . */
public EnvironmentSettingInner update ( String resourceGroupName , String labAccountName , String labName , String environmentSettingName , EnvironmentSettingFragment environmentSetting ) { } }
|
return updateWithServiceResponseAsync ( resourceGroupName , labAccountName , labName , environmentSettingName , environmentSetting ) . toBlocking ( ) . single ( ) . body ( ) ;
|
public class AuditCollectorUtil { /** * Get all audit results */
@ SuppressWarnings ( "PMD" ) public static Map < AuditType , Audit > getAudit ( Dashboard dashboard , AuditSettings settings , long begin , long end ) { } }
|
Map < AuditType , Audit > audits = new HashMap < > ( ) ; String url = getAuditAPIUrl ( dashboard , settings , begin , end ) ; JSONObject auditResponseObj = parseObject ( url , settings ) ; if ( auditResponseObj == null ) { return audits ; } JSONArray globalStatus = ( JSONArray ) auditResponseObj . get ( STR_AUDITSTATUSES ) ; JSONObject review = ( JSONObject ) auditResponseObj . get ( STR_REVIEW ) ; JSONArray codeReviewJO = review . get ( AuditType . CODE_REVIEW . name ( ) ) == null ? null : ( JSONArray ) review . get ( AuditType . CODE_REVIEW . name ( ) ) ; Audit audit = getCodeReviewAudit ( codeReviewJO , globalStatus ) ; audits . put ( audit . getType ( ) , audit ) ; JSONArray scaJO = review . get ( AuditType . CODE_QUALITY . name ( ) ) == null ? null : ( JSONArray ) review . get ( AuditType . CODE_QUALITY . name ( ) ) ; audit = getCodeQualityAudit ( scaJO , globalStatus ) ; audits . put ( audit . getType ( ) , audit ) ; JSONArray perfJO = review . get ( AuditType . PERF_TEST . name ( ) ) == null ? null : ( JSONArray ) review . get ( AuditType . PERF_TEST . name ( ) ) ; audit = getPerfAudit ( perfJO , globalStatus ) ; audits . put ( audit . getType ( ) , audit ) ; JSONArray ossJO = review . get ( AuditType . LIBRARY_POLICY . name ( ) ) == null ? null : ( JSONArray ) review . get ( AuditType . LIBRARY_POLICY . name ( ) ) ; audit = getOSSAudit ( ossJO , globalStatus ) ; audits . put ( audit . getType ( ) , audit ) ; JSONArray testJO = review . get ( AuditType . TEST_RESULT . name ( ) ) == null ? null : ( JSONArray ) review . get ( AuditType . TEST_RESULT . name ( ) ) ; audit = getTestAudit ( testJO , globalStatus ) ; audits . put ( audit . getType ( ) , audit ) ; JSONArray sscaJO = review . get ( AuditType . STATIC_SECURITY_ANALYSIS . name ( ) ) == null ? null : ( JSONArray ) review . get ( AuditType . STATIC_SECURITY_ANALYSIS . name ( ) ) ; audit = getSecurityAudit ( sscaJO , globalStatus ) ; audits . put ( audit . getType ( ) , audit ) ; return audits ;
|
public class ExtendedPseudoRandomGenerator { /** * Use the polar form of the Box - Muller transformation to obtain
* a pseudo random number from a Gaussian distribution
* Code taken from Maurice Clerc ' s implementation
* @ param mean
* @ param standardDeviation
* @ return A pseudo random number */
public double randNormal ( double mean , double standardDeviation ) { } }
|
double x1 , x2 , w , y1 ; do { x1 = 2.0 * randomGenerator . nextDouble ( ) - 1.0 ; x2 = 2.0 * randomGenerator . nextDouble ( ) - 1.0 ; w = x1 * x1 + x2 * x2 ; } while ( w >= 1.0 ) ; w = Math . sqrt ( ( - 2.0 * Math . log ( w ) ) / w ) ; y1 = x1 * w ; y1 = y1 * standardDeviation + mean ; return y1 ;
|
public class RadixSort { /** * Specialization of getCounts ( ) for key - prefix arrays . We could probably combine this with
* getCounts with some added parameters but that seems to hurt in benchmarks . */
private static long [ ] [ ] getKeyPrefixArrayCounts ( LongArray array , long startIndex , long numRecords , int startByteIndex , int endByteIndex ) { } }
|
long [ ] [ ] counts = new long [ 8 ] [ ] ; long bitwiseMax = 0 ; long bitwiseMin = - 1L ; long baseOffset = array . getBaseOffset ( ) + startIndex * 8L ; long limit = baseOffset + numRecords * 16L ; Object baseObject = array . getBaseObject ( ) ; for ( long offset = baseOffset ; offset < limit ; offset += 16 ) { long value = Platform . getLong ( baseObject , offset + 8 ) ; bitwiseMax |= value ; bitwiseMin &= value ; } long bitsChanged = bitwiseMin ^ bitwiseMax ; for ( int i = startByteIndex ; i <= endByteIndex ; i ++ ) { if ( ( ( bitsChanged >>> ( i * 8 ) ) & 0xff ) != 0 ) { counts [ i ] = new long [ 256 ] ; for ( long offset = baseOffset ; offset < limit ; offset += 16 ) { counts [ i ] [ ( int ) ( ( Platform . getLong ( baseObject , offset + 8 ) >>> ( i * 8 ) ) & 0xff ) ] ++ ; } } } return counts ;
|
public class BaasDocument { /** * Asynchronously deletes the document with { @ code id } from { @ code collection }
* @ param collection the collection of the document
* @ param id the id of the document
* @ param flags { @ link RequestOptions }
* @ param handler a callback to be invoked with the result of the request
* @ return a { @ link com . baasbox . android . RequestToken } to handle the asynchronous request */
public static RequestToken delete ( String collection , String id , int flags , BaasHandler < Void > handler ) { } }
|
BaasBox box = BaasBox . getDefaultChecked ( ) ; if ( collection == null ) throw new IllegalArgumentException ( "collection cannot be null" ) ; if ( id == null ) throw new IllegalArgumentException ( "id cannot be null" ) ; Delete delete = new Delete ( box , collection , id , flags , handler ) ; return box . submitAsync ( delete ) ;
|
public class ServerSocketService { /** * Returns a { @ link SettableFuture } from the map of connections .
* < p > This method has the following properties :
* < ul >
* < li > If the id is present in { @ link # connectionState } , this will throw an
* { @ link IllegalStateException } .
* < li > The id and source are recorded in { @ link # connectionState }
* < li > If the future is already in { @ link # halfFinishedConnections } , it is removed and
* returned .
* < li > If the future is not in { @ link # halfFinishedConnections } , a new { @ link SettableFuture }
* is added and then returned .
* < p > These features together ensure that each connection can only be accepted once , only
* requested once and once both have happened it will be removed from
* { @ link # halfFinishedConnections } . */
private SettableFuture < OpenedSocket > getConnectionImpl ( UUID id , Source source ) { } }
|
lock . lock ( ) ; try { checkState ( connectionState . put ( source , id ) , "Connection for %s has already been %s" , id , source ) ; SettableFuture < OpenedSocket > future = halfFinishedConnections . get ( id ) ; if ( future == null ) { future = SettableFuture . create ( ) ; halfFinishedConnections . put ( id , future ) ; } else { halfFinishedConnections . remove ( id ) ; } return future ; } finally { lock . unlock ( ) ; }
|
public class Post4 { /** * # inject */
@ Override public Behavior initialBehavior ( Optional < BlogState > snapshotState ) { } }
|
if ( snapshotState . isPresent ( ) && ! snapshotState . get ( ) . isEmpty ( ) ) { // behavior after snapshot must be restored by initialBehavior
return becomePostAdded ( snapshotState . get ( ) ) ; } else { // Behavior consist of a State and defined event handlers and command handlers .
BehaviorBuilder b = newBehaviorBuilder ( BlogState . EMPTY ) ; // Command handlers are invoked for incoming messages ( commands ) .
// A command handler must " return " the events to be persisted ( if any ) .
b . setCommandHandler ( AddPost . class , ( AddPost cmd , CommandContext < AddPostDone > ctx ) -> { if ( cmd . getContent ( ) . getTitle ( ) == null || cmd . getContent ( ) . getTitle ( ) . equals ( "" ) ) { ctx . invalidCommand ( "Title must be defined" ) ; return ctx . done ( ) ; } final PostAdded postAdded = new PostAdded ( entityId ( ) , cmd . getContent ( ) ) ; return ctx . thenPersist ( postAdded , ( PostAdded evt ) -> // After persist is done additional side effects can be performed
ctx . reply ( new AddPostDone ( entityId ( ) ) ) ) ; } ) ; // Event handlers are used both when when persisting new events and when replaying
// events .
b . setEventHandlerChangingBehavior ( PostAdded . class , evt -> becomePostAdded ( new BlogState ( Optional . of ( evt . getContent ( ) ) , false ) ) ) ; return b . build ( ) ; }
|
public class SoapFault { /** * Sets the locale used in SOAP fault .
* @ param locale */
public SoapFault locale ( String locale ) { } }
|
LocaleEditor localeEditor = new LocaleEditor ( ) ; localeEditor . setAsText ( locale ) ; this . locale = ( Locale ) localeEditor . getValue ( ) ; return this ;
|
public class MergePolicyValidator { /** * Checks if a { @ link SplitBrainMergeTypeProvider } provides all required types of a given merge policy instance .
* @ param mergeTypeProvider the { @ link SplitBrainMergeTypeProvider } to retrieve the provided merge types
* @ param mergePolicyInstance the merge policy instance
* @ return a list of the required merge types if the merge policy is a { @ link SplitBrainMergePolicy } , { @ code null } otherwise */
private static List < Class > checkMergePolicy ( SplitBrainMergeTypeProvider mergeTypeProvider , Object mergePolicyInstance ) { } }
|
if ( mergePolicyInstance instanceof SplitBrainMergePolicy ) { return checkSplitBrainMergePolicy ( mergeTypeProvider , ( SplitBrainMergePolicy ) mergePolicyInstance ) ; } return null ;
|
public class MetadataHook { /** * Builds a new { @ link Token } from a partition key , according to the partitioner reported by the Cassandra nodes .
* @ param metadata the original driver ' s metadata .
* @ param routingKey the routing key of the bound partition key
* @ return the token .
* @ throws IllegalStateException if the token factory was not initialized . This would typically
* happen if metadata was explicitly disabled with { @ link QueryOptions # setMetadataEnabled ( boolean ) }
* before startup . */
public static Token newToken ( Metadata metadata , ByteBuffer routingKey ) { } }
|
return metadata . tokenFactory ( ) . hash ( routingKey ) ;
|
public class SmbSessionImpl { /** * Establish a tree connection with the configured logon share
* @ throws SmbException */
@ Override public void treeConnectLogon ( ) throws SmbException { } }
|
String logonShare = getContext ( ) . getConfig ( ) . getLogonShare ( ) ; if ( logonShare == null || logonShare . isEmpty ( ) ) { throw new SmbException ( "Logon share is not defined" ) ; } try ( SmbTreeImpl t = getSmbTree ( logonShare , null ) ) { t . treeConnect ( null , null ) ; } catch ( CIFSException e ) { throw SmbException . wrap ( e ) ; }
|
public class CSSStyleSheetImpl { /** * delete the rule at the given pos .
* @ param index the pos
* @ throws DOMException in case of error */
public void deleteRule ( final int index ) throws DOMException { } }
|
try { getCssRules ( ) . delete ( index ) ; } catch ( final IndexOutOfBoundsException e ) { throw new DOMExceptionImpl ( DOMException . INDEX_SIZE_ERR , DOMExceptionImpl . INDEX_OUT_OF_BOUNDS , e . getMessage ( ) ) ; }
|
public class ProbabilitySampler { /** * Returns a new { @ link ProbabilitySampler } . The probability of sampling a trace is equal to that
* of the specified probability .
* @ param probability The desired probability of sampling . Must be within [ 0.0 , 1.0 ] .
* @ return a new { @ link ProbabilitySampler } .
* @ throws IllegalArgumentException if { @ code probability } is out of range */
static ProbabilitySampler create ( double probability ) { } }
|
Utils . checkArgument ( probability >= 0.0 && probability <= 1.0 , "probability must be in range [0.0, 1.0]" ) ; long idUpperBound ; // Special case the limits , to avoid any possible issues with lack of precision across
// double / long boundaries . For probability = = 0.0 , we use Long . MIN _ VALUE as this guarantees
// that we will never sample a trace , even in the case where the id = = Long . MIN _ VALUE , since
// Math . Abs ( Long . MIN _ VALUE ) = = Long . MIN _ VALUE .
if ( probability == 0.0 ) { idUpperBound = Long . MIN_VALUE ; } else if ( probability == 1.0 ) { idUpperBound = Long . MAX_VALUE ; } else { idUpperBound = ( long ) ( probability * Long . MAX_VALUE ) ; } return new AutoValue_ProbabilitySampler ( probability , idUpperBound ) ;
|
public class EnvironmentCheck { /** * Report version info from SAX interfaces .
* Currently distinguishes between SAX 2 , SAX 2.0beta2,
* SAX1 , and not found .
* @ param h Hashtable to put information in */
protected void checkSAXVersion ( Hashtable h ) { } }
|
if ( null == h ) h = new Hashtable ( ) ; final String SAX_VERSION1_CLASS = "org.xml.sax.Parser" ; final String SAX_VERSION1_METHOD = "parse" ; // String
final String SAX_VERSION2_CLASS = "org.xml.sax.XMLReader" ; final String SAX_VERSION2_METHOD = "parse" ; // String
final String SAX_VERSION2BETA_CLASSNF = "org.xml.sax.helpers.AttributesImpl" ; final String SAX_VERSION2BETA_METHODNF = "setAttributes" ; // Attributes
final Class oneStringArg [ ] = { java . lang . String . class } ; // Note this introduces a minor compile dependency on SAX . . .
final Class attributesArg [ ] = { org . xml . sax . Attributes . class } ; try { // This method was only added in the final SAX 2.0 release ;
// see changes . html " Changes from SAX 2.0beta2 to SAX 2.0prerelease "
Class clazz = ObjectFactory . findProviderClass ( SAX_VERSION2BETA_CLASSNF , ObjectFactory . findClassLoader ( ) , true ) ; Method method = clazz . getMethod ( SAX_VERSION2BETA_METHODNF , attributesArg ) ; // If we succeeded , we have loaded interfaces from a
// real , final SAX version 2.0 somewhere
h . put ( VERSION + "SAX" , "2.0" ) ; } catch ( Exception e ) { // If we didn ' t find the SAX 2.0 class , look for a 2.0beta2
h . put ( ERROR + VERSION + "SAX" , "ERROR attempting to load SAX version 2 class: " + e . toString ( ) ) ; h . put ( ERROR , ERROR_FOUND ) ; try { Class clazz = ObjectFactory . findProviderClass ( SAX_VERSION2_CLASS , ObjectFactory . findClassLoader ( ) , true ) ; Method method = clazz . getMethod ( SAX_VERSION2_METHOD , oneStringArg ) ; // If we succeeded , we have loaded interfaces from a
// SAX version 2.0beta2 or earlier ; these might work but
// you should really have the final SAX 2.0
h . put ( VERSION + "SAX-backlevel" , "2.0beta2-or-earlier" ) ; } catch ( Exception e2 ) { // If we didn ' t find the SAX 2.0beta2 class , look for a 1.0 one
h . put ( ERROR + VERSION + "SAX" , "ERROR attempting to load SAX version 2 class: " + e . toString ( ) ) ; h . put ( ERROR , ERROR_FOUND ) ; try { Class clazz = ObjectFactory . findProviderClass ( SAX_VERSION1_CLASS , ObjectFactory . findClassLoader ( ) , true ) ; Method method = clazz . getMethod ( SAX_VERSION1_METHOD , oneStringArg ) ; // If we succeeded , we have loaded interfaces from a
// SAX version 1.0 somewhere ; which won ' t work very
// well for JAXP 1.1 or beyond !
h . put ( VERSION + "SAX-backlevel" , "1.0" ) ; } catch ( Exception e3 ) { // If we didn ' t find the SAX 2.0 class , look for a 1.0 one
// Note that either 1.0 or no SAX are both errors
h . put ( ERROR + VERSION + "SAX-backlevel" , "ERROR attempting to load SAX version 1 class: " + e3 . toString ( ) ) ; } } }
|
public class RangeUtils { /** * Returns the token ranges that will be mapped to Spark partitions .
* @ param config the Deep configuration object .
* @ return the list of computed token ranges . */
public static List < DeepTokenRange > getSplits ( CassandraDeepJobConfig config ) { } }
|
Map < String , Iterable < Comparable > > tokens = new HashMap < > ( ) ; IPartitioner p = getPartitioner ( config ) ; Pair < Session , String > sessionWithHost = CassandraClientProvider . getSession ( config . getHost ( ) , config , false ) ; String queryLocal = "select tokens from system.local" ; tokens . putAll ( fetchTokens ( queryLocal , sessionWithHost , p ) ) ; String queryPeers = "select peer, tokens from system.peers" ; tokens . putAll ( fetchTokens ( queryPeers , sessionWithHost , p ) ) ; List < DeepTokenRange > merged = mergeTokenRanges ( tokens , sessionWithHost . left , p ) ; return splitRanges ( merged , p , config . getBisectFactor ( ) ) ;
|
public class ManagementModule { /** * { @ inheritDoc } */
@ Override public boolean addRelationship ( Context context , String pid , String relationship , String object , boolean isLiteral , String datatype ) throws ServerException { } }
|
return mgmt . addRelationship ( context , pid , relationship , object , isLiteral , datatype ) ;
|
public class CommerceWishListItemLocalServiceWrapper { /** * Adds the commerce wish list item to the database . Also notifies the appropriate model listeners .
* @ param commerceWishListItem the commerce wish list item
* @ return the commerce wish list item that was added */
@ Override public com . liferay . commerce . wish . list . model . CommerceWishListItem addCommerceWishListItem ( com . liferay . commerce . wish . list . model . CommerceWishListItem commerceWishListItem ) { } }
|
return _commerceWishListItemLocalService . addCommerceWishListItem ( commerceWishListItem ) ;
|
public class CmsContentEditorHandler { /** * Cancels opening the editor . < p > */
void cancelEdit ( ) { } }
|
m_handler . enableToolbarButtons ( ) ; m_handler . activateSelection ( ) ; m_handler . m_controller . setContentEditing ( false ) ; m_handler . m_controller . reInitInlineEditing ( ) ; m_replaceElement = null ; m_dependingElementId = null ; m_currentElementId = null ; m_editorOpened = false ;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.