signature stringlengths 43 39.1k | implementation stringlengths 0 450k |
|---|---|
public class AddOperations { /** * In query protocol , the wrapped result is the real return type for the given operation . In the c2j model ,
* if the output shape has only one member , and the member shape is wrapped ( wrapper is true ) , then the
* return type is the wrapped member shape instead of the output shape . In the following example , the service API is :
* public Foo operation ( OperationRequest operationRequest ) ;
* And the wire log is :
* < OperationResponse >
* < OperationResult >
* < Foo >
* < / Foo >
* < / OperationResult >
* < OperationMetadata >
* < / OperationMetadata >
* < / OperationResponse >
* The C2j model is :
* " Operation " : {
* " input " : { " shape " : " OperationRequest " } ,
* " output " : {
* " shape " : " OperationResult " ,
* " resultWrapper " : " OperationResult "
* " OperationResult " : {
* " members " : {
* " Foo " : { " shape " : " Foo " }
* " Foo " : {
* " wrapper " : true
* Return the wrapped shape name from the given operation if it conforms to the condition
* described above , otherwise , simply return the direct output shape name . */
private static String getResultShapeName ( Operation operation , Map < String , Shape > shapes ) { } } | Output output = operation . getOutput ( ) ; if ( output == null ) return null ; Shape outputShape = shapes . get ( output . getShape ( ) ) ; if ( outputShape . getMembers ( ) . keySet ( ) . size ( ) != 1 ) return output . getShape ( ) ; Member wrappedMember = outputShape . getMembers ( ) . values ( ) . toArray ( new Member [ 0 ] ) [ 0 ] ; Shape wrappedResult = shapes . get ( wrappedMember . getShape ( ) ) ; return wrappedResult != null && wrappedResult . isWrapper ( ) ? wrappedMember . getShape ( ) : output . getShape ( ) ; |
public class Immutables { /** * Wraps a map with an immutable map . There is no copying involved .
* @ param map the map to wrap
* @ return an immutable map wrapper that delegates to the original map */
public static < K , V > Map < K , V > immutableMapWrap ( Map < ? extends K , ? extends V > map ) { } } | return new ImmutableMapWrapper < > ( map ) ; |
public class BuildReference { /** * Gets the build if still in memory .
* @ return the actual build , or null if it has been collected
* @ see Holder # get */
public @ CheckForNull R get ( ) { } } | Holder < R > h = holder ; // capture
return h != null ? h . get ( ) : null ; |
public class BitsUtil { /** * Fill a vector initialized with " bits " ones .
* @ param v Vector to fill .
* @ param bits Size */
public static void onesI ( long [ ] v , int bits ) { } } | final int fillWords = bits >>> LONG_LOG2_SIZE ; final int fillBits = bits & LONG_LOG2_MASK ; Arrays . fill ( v , 0 , fillWords , LONG_ALL_BITS ) ; if ( fillBits > 0 ) { v [ fillWords ] = ( 1L << fillBits ) - 1 ; } if ( fillWords + 1 < v . length ) { Arrays . fill ( v , fillWords + 1 , v . length , 0L ) ; } |
public class JobGraphGenerator { /** * This method implements the post - visit during the depth - first traversal . When the post visit happens ,
* all of the descendants have been processed , so this method connects all of the current node ' s
* predecessors to the current node .
* @ param node
* The node currently processed during the post - visit .
* @ see org . apache . flink . util . Visitor # postVisit ( org . apache . flink . util . Visitable ) t */
@ Override public void postVisit ( PlanNode node ) { } } | try { // - - - - - check special cases for which we handle post visit differently - - - - -
// skip data source node ( they have no inputs )
// also , do nothing for union nodes , we connect them later when gathering the inputs for a task
// solution sets have no input . the initial solution set input is connected when the iteration node is in its postVisit
if ( node instanceof SourcePlanNode || node instanceof NAryUnionPlanNode || node instanceof SolutionSetPlanNode ) { return ; } // check if we have an iteration . in that case , translate the step function now
if ( node instanceof IterationPlanNode ) { // prevent nested iterations
if ( node . isOnDynamicPath ( ) ) { throw new CompilerException ( "Nested Iterations are not possible at the moment!" ) ; } // if we recursively go into an iteration ( because the constant path of one iteration contains
// another one ) , we push the current one onto the stack
if ( this . currentIteration != null ) { this . iterationStack . add ( this . currentIteration ) ; } this . currentIteration = ( IterationPlanNode ) node ; this . currentIteration . acceptForStepFunction ( this ) ; // pop the current iteration from the stack
if ( this . iterationStack . isEmpty ( ) ) { this . currentIteration = null ; } else { this . currentIteration = this . iterationStack . remove ( this . iterationStack . size ( ) - 1 ) ; } // inputs for initial bulk partial solution or initial workset are already connected to the iteration head in the head ' s post visit .
// connect the initial solution set now .
if ( node instanceof WorksetIterationPlanNode ) { // connect the initial solution set
WorksetIterationPlanNode wsNode = ( WorksetIterationPlanNode ) node ; JobVertex headVertex = this . iterations . get ( wsNode ) . getHeadTask ( ) ; TaskConfig headConfig = new TaskConfig ( headVertex . getConfiguration ( ) ) ; int inputIndex = headConfig . getDriverStrategy ( ) . getNumInputs ( ) ; headConfig . setIterationHeadSolutionSetInputIndex ( inputIndex ) ; translateChannel ( wsNode . getInitialSolutionSetInput ( ) , inputIndex , headVertex , headConfig , false ) ; } return ; } final JobVertex targetVertex = this . vertices . get ( node ) ; // - - - - - Main Path : Translation of channels - - - - -
// There are two paths of translation : One for chained tasks ( or merged tasks in general ) ,
// which do not have their own task vertex . The other for tasks that have their own vertex ,
// or are the primary task in a vertex ( to which the others are chained ) .
// check whether this node has its own task , or is merged with another one
if ( targetVertex == null ) { // node ' s task is merged with another task . it is either chained , of a merged head vertex
// from an iteration
final TaskInChain chainedTask ; if ( ( chainedTask = this . chainedTasks . get ( node ) ) != null ) { // Chained Task . Sanity check first . . .
final Iterator < Channel > inConns = node . getInputs ( ) . iterator ( ) ; if ( ! inConns . hasNext ( ) ) { throw new CompilerException ( "Bug: Found chained task with no input." ) ; } final Channel inConn = inConns . next ( ) ; if ( inConns . hasNext ( ) ) { throw new CompilerException ( "Bug: Found a chained task with more than one input!" ) ; } if ( inConn . getLocalStrategy ( ) != null && inConn . getLocalStrategy ( ) != LocalStrategy . NONE ) { throw new CompilerException ( "Bug: Found a chained task with an input local strategy." ) ; } if ( inConn . getShipStrategy ( ) != null && inConn . getShipStrategy ( ) != ShipStrategyType . FORWARD ) { throw new CompilerException ( "Bug: Found a chained task with an input ship strategy other than FORWARD." ) ; } JobVertex container = chainedTask . getContainingVertex ( ) ; if ( container == null ) { final PlanNode sourceNode = inConn . getSource ( ) ; container = this . vertices . get ( sourceNode ) ; if ( container == null ) { // predecessor is itself chained
container = this . chainedTasks . get ( sourceNode ) . getContainingVertex ( ) ; if ( container == null ) { throw new IllegalStateException ( "Bug: Chained task predecessor has not been assigned its containing vertex." ) ; } } else { // predecessor is a proper task job vertex and this is the first chained task . add a forward connection entry .
new TaskConfig ( container . getConfiguration ( ) ) . addOutputShipStrategy ( ShipStrategyType . FORWARD ) ; } chainedTask . setContainingVertex ( container ) ; } // add info about the input serializer type
chainedTask . getTaskConfig ( ) . setInputSerializer ( inConn . getSerializer ( ) , 0 ) ; // update name of container task
String containerTaskName = container . getName ( ) ; if ( containerTaskName . startsWith ( "CHAIN " ) ) { container . setName ( containerTaskName + " -> " + chainedTask . getTaskName ( ) ) ; } else { container . setName ( "CHAIN " + containerTaskName + " -> " + chainedTask . getTaskName ( ) ) ; } // update resource of container task
container . setResources ( container . getMinResources ( ) . merge ( node . getMinResources ( ) ) , container . getPreferredResources ( ) . merge ( node . getPreferredResources ( ) ) ) ; this . chainedTasksInSequence . add ( chainedTask ) ; return ; } else if ( node instanceof BulkPartialSolutionPlanNode || node instanceof WorksetPlanNode ) { // merged iteration head task . the task that the head is merged with will take care of it
return ; } else { throw new CompilerException ( "Bug: Unrecognized merged task vertex." ) ; } } // - - - - - Here , we translate non - chained tasks - - - - -
if ( this . currentIteration != null ) { JobVertex head = this . iterations . get ( this . currentIteration ) . getHeadTask ( ) ; // Exclude static code paths from the co - location constraint , because otherwise
// their execution determines the deployment slots of the co - location group
if ( node . isOnDynamicPath ( ) ) { targetVertex . setStrictlyCoLocatedWith ( head ) ; } } // create the config that will contain all the description of the inputs
final TaskConfig targetVertexConfig = new TaskConfig ( targetVertex . getConfiguration ( ) ) ; // get the inputs . if this node is the head of an iteration , we obtain the inputs from the
// enclosing iteration node , because the inputs are the initial inputs to the iteration .
final Iterator < Channel > inConns ; if ( node instanceof BulkPartialSolutionPlanNode ) { inConns = ( ( BulkPartialSolutionPlanNode ) node ) . getContainingIterationNode ( ) . getInputs ( ) . iterator ( ) ; // because the partial solution has its own vertex , is has only one ( logical ) input .
// note this in the task configuration
targetVertexConfig . setIterationHeadPartialSolutionOrWorksetInputIndex ( 0 ) ; } else if ( node instanceof WorksetPlanNode ) { WorksetPlanNode wspn = ( WorksetPlanNode ) node ; // input that is the initial workset
inConns = Collections . singleton ( wspn . getContainingIterationNode ( ) . getInput2 ( ) ) . iterator ( ) ; // because we have a stand - alone ( non - merged ) workset iteration head , the initial workset will
// be input 0 and the solution set will be input 1
targetVertexConfig . setIterationHeadPartialSolutionOrWorksetInputIndex ( 0 ) ; targetVertexConfig . setIterationHeadSolutionSetInputIndex ( 1 ) ; } else { inConns = node . getInputs ( ) . iterator ( ) ; } if ( ! inConns . hasNext ( ) ) { throw new CompilerException ( "Bug: Found a non-source task with no input." ) ; } int inputIndex = 0 ; while ( inConns . hasNext ( ) ) { Channel input = inConns . next ( ) ; inputIndex += translateChannel ( input , inputIndex , targetVertex , targetVertexConfig , false ) ; } // broadcast variables
int broadcastInputIndex = 0 ; for ( NamedChannel broadcastInput : node . getBroadcastInputs ( ) ) { int broadcastInputIndexDelta = translateChannel ( broadcastInput , broadcastInputIndex , targetVertex , targetVertexConfig , true ) ; targetVertexConfig . setBroadcastInputName ( broadcastInput . getName ( ) , broadcastInputIndex ) ; targetVertexConfig . setBroadcastInputSerializer ( broadcastInput . getSerializer ( ) , broadcastInputIndex ) ; broadcastInputIndex += broadcastInputIndexDelta ; } } catch ( Exception e ) { throw new CompilerException ( "An error occurred while translating the optimized plan to a JobGraph: " + e . getMessage ( ) , e ) ; } |
public class TridentTopology { public Stream newStream ( String txId , IRichSpout spout ) { } } | return newStream ( txId , new RichSpoutBatchExecutor ( spout ) ) ; |
public class Messages { /** * Delete all messages .
* Permanently deletes all messages held by the specified server . This operation cannot be undone . Also deletes any attachments related to each message .
* @ param server The identifier of the server to be emptied .
* @ throws MailosaurException thrown if the request is rejected by server */
public void deleteAll ( String server ) throws MailosaurException { } } | HashMap < String , String > query = new HashMap < String , String > ( ) ; query . put ( "server" , server ) ; client . request ( "DELETE" , "api/messages" , query ) ; |
public class RepositoryResourceImpl { /** * { @ inheritDoc } */
@ Override public void updateGeneratedFields ( boolean performEditionChecking ) throws RepositoryResourceCreationException { } } | // update the asset filter info .
updateAssetFilterInfo ( performEditionChecking ) ; if ( ( _asset != null ) && ( _asset . getWlpInformation ( ) ) != null && ( _asset . getWlpInformation ( ) . getVanityRelativeURL ( ) ) == null ) { createVanityURL ( ) ; } |
public class SearchIndexContextListener { /** * Rebuild the search index during context initialization .
* @ param servletContextEvent The context event ( not really used ) . */
@ Override public void contextInitialized ( final ServletContextEvent servletContextEvent ) { } } | logger . info ( "Rebuilding Search Index..." ) ; // Build the session factory .
SessionFactory factory = createSessionFactory ( ) ; // Build the hibernate session .
Session session = factory . openSession ( ) ; // Create the fulltext session .
FullTextSession fullTextSession = Search . getFullTextSession ( session ) ; try { fullTextSession . createIndexer ( ) . startAndWait ( ) ; } catch ( InterruptedException e ) { logger . warn ( "Search reindex interrupted. Good luck!" ) ; logger . trace ( "Error:" , e ) ; } finally { // Close everything and release the lock file .
session . close ( ) ; factory . close ( ) ; } |
public class DateUtils { /** * Parse format { @ link # DATETIME _ FORMAT } . This method never throws exception .
* @ param s any string
* @ return the datetime , { @ code null } if parsing error or if parameter is { @ code null }
* @ since 6.6 */
@ CheckForNull public static OffsetDateTime parseOffsetDateTimeQuietly ( @ Nullable String s ) { } } | OffsetDateTime datetime = null ; if ( s != null ) { try { datetime = parseOffsetDateTime ( s ) ; } catch ( RuntimeException e ) { // ignore
} } return datetime ; |
public class GSONHandle { /** * Creates a factory to create a GSONHandle instance for a JsonElement node .
* @ returnthe factory */
static public ContentHandleFactory newFactory ( ) { } } | return new ContentHandleFactory ( ) { @ Override public Class < ? > [ ] getHandledClasses ( ) { return new Class < ? > [ ] { JsonElement . class } ; } @ Override public boolean isHandled ( Class < ? > type ) { return JsonElement . class . isAssignableFrom ( type ) ; } @ Override public < C > ContentHandle < C > newHandle ( Class < C > type ) { @ SuppressWarnings ( "unchecked" ) ContentHandle < C > handle = isHandled ( type ) ? ( ContentHandle < C > ) new GSONHandle ( ) : null ; return handle ; } } ; |
public class NetworkUtils { /** * Constructs a new instance of { @ link SocketAddress } bound to the given { @ link String host } and { @ link Integer port } .
* @ param host { @ link String } containing the name of the host to whichthe { @ link SocketAddress } will be bound .
* @ param port { @ link Integer } specifying the port number to which the { @ link SocketAddress } will be bound .
* @ return a new instance of { @ link SocketAddress } bound to the given { @ link Integer port } .
* @ throws IllegalArgumentException if the port parameter is outside the range of valid port values .
* @ see # newSocketAddress ( String , int )
* @ see java . net . InetSocketAddress
* @ see java . net . SocketAddress */
public static SocketAddress newSocketAddress ( String host , int port ) { } } | return Optional . ofNullable ( host ) . map ( hostname -> new InetSocketAddress ( host , port ) ) . orElseGet ( ( ) -> new InetSocketAddress ( port ) ) ; |
public class JmsManagedConnectionFactoryImpl { /** * Gets the target
* @ return The target
* @ see com . ibm . websphere . sib . api . jms . JmsConnectionFactory # getTarget */
@ Override public String getTarget ( ) { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "getTarget" ) ; String remoteTargetGroup = jcaConnectionFactory . getTarget ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "getTarget" , remoteTargetGroup ) ; return remoteTargetGroup ; |
public class ConversionContext { /** * Creates a new ConversionContext for the given { @ link ProviderContext } and the given { @ link RateType } .
* < i > Note : < / i > for adding additional attributes use { @ link javax . money . convert . ConversionContextBuilder
* ( ProviderContext , RateType ) } .
* @ param providerContext the provider context , not null .
* @ param rateType the rate type , not null .
* @ return a corresponding instance of ConversionContext . */
public static ConversionContext from ( ProviderContext providerContext , RateType rateType ) { } } | return ConversionContextBuilder . create ( providerContext , rateType ) . build ( ) ; |
public class UpdatePreferencesServlet { /** * Move an element to another location on the tab .
* < p > Used by Respondr UI when moving portlets around in content area . Will be made more generic
* to support ngPortal UI which supports arbitrary nesting of folders . When that code is merged
* in , the behavior of this method will need to change slightly ( make sure movePortlet behavior
* doesn ' t change though ) . Current behavior :
* < ul >
* < li > If destination is a tab either adds to end of 1st column or if no columns , creates one
* and adds it . AFAIK this was not actually used by the UI .
* < li > If target is a column ( 2 down from root ) , portlet always added to end of column . Used
* by UI to drop portlet into empty column ( UI did insertBefore with elementId = columnId )
* < li > If method = insertBefore does insert before elementId ( always a portlet in 4.2 ) .
* < li > If method = appendAfter does append at end of parent ( elementId ) , result of which is a
* column . Used by UI to add to end of column ( elementId is last portlet in column ) .
* < / ul >
* @ param request
* @ param response
* @ param sourceId id of the element to move
* @ param method insertBefore or appendAfter
* @ param destinationId Id of element . If a tab , sourceID added to end of a folder / column in the
* tab . If a folder , sourceID added to the end of the folder . Otherwise sourceID added
* before elementID .
* @ throws IOException
* @ throws PortalException */
@ RequestMapping ( method = RequestMethod . POST , params = "action=moveElement" ) public ModelAndView moveElement ( HttpServletRequest request , HttpServletResponse response , @ RequestParam ( value = "sourceID" ) String sourceId , @ RequestParam String method , @ RequestParam ( value = "elementID" ) String destinationId ) throws IOException , PortalException { } } | final Locale locale = RequestContextUtils . getLocale ( request ) ; if ( moveElementInternal ( request , sourceId , destinationId , method ) ) { return new ModelAndView ( "jsonView" , Collections . singletonMap ( "response" , getMessage ( "success.move.element" , "Element moved successfully" , locale ) ) ) ; } else { response . setStatus ( HttpServletResponse . SC_FORBIDDEN ) ; return new ModelAndView ( "jsonView" , Collections . singletonMap ( "response" , getMessage ( "error.move.element" , "Error moving element" , locale ) ) ) ; } |
public class Socks4ClientBootstrap { /** * try to look at the remoteAddress and decide to use SOCKS4 or SOCKS4a handshake
* packet . */
private static ChannelFuture socksConnect ( Channel channel , InetSocketAddress remoteAddress ) { } } | channel . write ( createHandshake ( remoteAddress ) ) ; return ( ( Socks4HandshakeHandler ) channel . getPipeline ( ) . get ( "handshake" ) ) . getChannelFuture ( ) ; |
public class CassAccess { /** * To create DCAwareRoundRobing Policy :
* Need Properties
* LATITUDE ( or AFT _ LATITUDE )
* LONGITUDE ( or AFT _ LONGITUDE )
* CASSANDRA CLUSTERS with additional information :
* machine : DC : lat : long , machine : DC : lat : long
* @ param env
* @ param prefix
* @ return
* @ throws APIException
* @ throws IOException */
@ SuppressWarnings ( "deprecation" ) public static synchronized Cluster cluster ( Env env , String prefix ) throws APIException , IOException { } } | if ( cb == null ) { String pre ; if ( prefix == null ) { pre = "" ; } else { env . info ( ) . log ( "Cassandra Connection for " , prefix ) ; pre = prefix + '.' ; } cb = Cluster . builder ( ) ; String str = env . getProperty ( pre + CASSANDRA_CLUSTERS_PORT , "9042" ) ; if ( str != null ) { env . init ( ) . log ( "Cass Port = " , str ) ; cb . withPort ( Integer . parseInt ( str ) ) ; } str = env . getProperty ( pre + CASSANDRA_CLUSTERS_USER_NAME , null ) ; if ( str != null ) { env . init ( ) . log ( "Cass User = " , str ) ; String epass = env . getProperty ( pre + CASSANDRA_CLUSTERS_PASSWORD , null ) ; if ( epass == null ) { throw new APIException ( "No Password configured for " + str ) ; } // TODO Figure out way to ensure Decryptor setting in AuthzEnv
if ( env instanceof AuthzEnv ) { cb . withCredentials ( str , ( ( AuthzEnv ) env ) . decrypt ( epass , true ) ) ; } else { cb . withCredentials ( str , env . decryptor ( ) . decrypt ( epass ) ) ; } } str = env . getProperty ( pre + CASSANDRA_RESET_EXCEPTIONS , null ) ; if ( str != null ) { env . init ( ) . log ( "Cass ResetExceptions = " , str ) ; for ( String ex : Split . split ( ',' , str ) ) { resetExceptions . add ( new Resettable ( env , ex ) ) ; } } str = env . getProperty ( LATITUDE , env . getProperty ( "AFT_LATITUDE" , null ) ) ; Double lat = str != null ? Double . parseDouble ( str ) : null ; str = env . getProperty ( LONGITUDE , env . getProperty ( "AFT_LONGITUDE" , null ) ) ; Double lon = str != null ? Double . parseDouble ( str ) : null ; if ( lat == null || lon == null ) { throw new APIException ( "LATITUDE(or AFT_LATITUDE) and/or LONGITUDE(or AFT_LATITUDE) are not set" ) ; } env . init ( ) . printf ( "Service Latitude,Longitude = %f,%f" , lat , lon ) ; str = env . getProperty ( pre + CASSANDRA_CLUSTERS , "localhost" ) ; env . init ( ) . log ( "Cass Clusters = " , str ) ; String [ ] machs = Split . split ( ',' , str ) ; String [ ] cpoints = new String [ machs . length ] ; String bestDC = null ; int numInBestDC = 1 ; double mlat , mlon , temp , distance = - 1.0 ; for ( int i = 0 ; i < machs . length ; ++ i ) { String [ ] minfo = Split . split ( ':' , machs [ i ] ) ; if ( minfo . length > 0 ) { cpoints [ i ] = minfo [ 0 ] ; } // Calc closest DC with Great Circle
if ( minfo . length > 3 ) { mlat = Double . parseDouble ( minfo [ 2 ] ) ; mlon = Double . parseDouble ( minfo [ 3 ] ) ; if ( ( temp = GreatCircle . calc ( lat , lon , mlat , mlon ) ) > distance ) { distance = temp ; if ( bestDC != null && bestDC . equals ( minfo [ 1 ] ) ) { ++ numInBestDC ; } else { bestDC = minfo [ 1 ] ; numInBestDC = 1 ; } } else { if ( bestDC != null && bestDC . equals ( minfo [ 1 ] ) ) { ++ numInBestDC ; } } } } cb . addContactPoints ( cpoints ) ; if ( bestDC != null ) { // 8/26/2016 Management has determined that Accuracy is preferred over speed in bad situations
// Local DC Aware Load Balancing appears to have the highest normal performance , with the best
// Degraded Accuracy
cb . withLoadBalancingPolicy ( new DCAwareRoundRobinPolicy ( bestDC , numInBestDC , true /* allow LocalDC to look at other DCs for LOCAL _ QUORUM */
) ) ; env . init ( ) . printf ( "Cassandra configured for DCAwareRoundRobinPolicy at %s with emergency remote of up to %d node(s)" , bestDC , numInBestDC ) ; } else { env . init ( ) . printf ( "Cassandra is using Default Policy, which is not DC aware" ) ; } } return cb . build ( ) ; |
public class CompactingHashTable { @ Override public < PT > HashTableProber < PT > getProber ( TypeComparator < PT > probeSideComparator , TypePairComparator < PT , T > pairComparator ) { } } | return new HashTableProber < PT > ( probeSideComparator , pairComparator ) ; |
public class StandardStashReader { /** * Returns a new StashReader that is locked to the same stash time the instance is currently using . Future calls to
* lock or unlock the stash time on this instance will not affect the returned instance . */
public StashReader getLockedView ( ) { } } | return new FixedStashReader ( URI . create ( String . format ( "s3://%s/%s" , _bucket , getRootPath ( ) ) ) , _s3 ) ; |
public class DFSPath { /** * Gets a connection to the DFS
* @ return a connection to the DFS
* @ throws IOException */
DistributedFileSystem getDFS ( ) throws IOException { } } | if ( this . dfs == null ) { FileSystem fs = location . getDFS ( ) ; if ( ! ( fs instanceof DistributedFileSystem ) ) { ErrorMessageDialog . display ( "DFS Browser" , "The DFS Browser cannot browse anything else " + "but a Distributed File System!" ) ; throw new IOException ( "DFS Browser expects a DistributedFileSystem!" ) ; } this . dfs = ( DistributedFileSystem ) fs ; } return this . dfs ; |
public class MonitorScheduler { /** * 取消注册对应的Monitor对象
* @ param monitor */
public static void unRegister ( Monitor monitor ) { } } | ScheduledFuture future = register . remove ( monitor ) ; if ( future != null ) { future . cancel ( true ) ; // 打断
} |
public class BigQuerySnippets { /** * [ VARIABLE " my _ dataset _ name " ] */
public boolean deleteDatasetFromId ( String projectId , String datasetName ) { } } | // [ START bigquery _ delete _ dataset ]
DatasetId datasetId = DatasetId . of ( projectId , datasetName ) ; boolean deleted = bigquery . delete ( datasetId , DatasetDeleteOption . deleteContents ( ) ) ; if ( deleted ) { // the dataset was deleted
} else { // the dataset was not found
} // [ END bigquery _ delete _ dataset ]
return deleted ; |
public class HtmlValidationConfiguration { /** * Determines whether the validation result window should pop up for this
* particular error . If just one error instructs that the window should pop
* up , it does so .
* @ param error
* the validation error
* @ return < code > true < / code > when the window should automatically pop up ,
* rendering the markup error in the face of the user */
public boolean mustShowWindowForError ( SAXParseException error ) { } } | for ( Pattern curIgnorePattern : ignoreErrorsForWindow ) { if ( curIgnorePattern . matcher ( error . getMessage ( ) ) . find ( ) ) return false ; } return true ; |
public class BDDCache { /** * Resizes the cache to a new number of entries . The old cache entries are removed in this process .
* @ param ns the new number of entries */
private void resize ( final int ns ) { } } | final int size = BDDPrime . primeGTE ( ns ) ; this . table = new BDDCacheEntry [ size ] ; for ( int n = 0 ; n < size ; n ++ ) this . table [ n ] = new BDDCacheEntry ( ) ; |
public class Searcher { /** * Gets the refinement values for a faceted attribute .
* @ param attribute the attribute refined on .
* @ return the refinements enabled for the given attribute , or { @ code null } if there is none . */
@ SuppressWarnings ( { } } | "WeakerAccess" , "unused" , "SameParameterValue" } ) // For library users
@ Nullable public List < String > getFacetRefinements ( @ NonNull String attribute ) { return refinementMap . get ( attribute ) ; |
public class ImageLoading { /** * Loading bitmap with using reuse bitmap with the different size of source image .
* If it is unable to load with reuse method tries to load without it .
* Reuse works only for Android 4.4 +
* @ param data image file contents
* @ param dest reuse bitmap
* @ return result of loading
* @ throws ImageLoadException if it is unable to load file */
public static ReuseResult loadReuse ( byte [ ] data , Bitmap dest ) throws ImageLoadException { } } | return loadBitmapReuse ( new MemorySource ( data ) , dest ) ; |
public class MarkdownParser { /** * Create a validation component for an hyper reference .
* @ param it the hyper reference .
* @ param currentFile the current file .
* @ param context the validation context .
* @ return the validation components . */
protected Iterable < DynamicValidationComponent > createValidatorComponents ( Link it , File currentFile , DynamicValidationContext context ) { } } | final Collection < DynamicValidationComponent > components = new ArrayList < > ( ) ; if ( isLocalFileReferenceValidation ( ) || isRemoteReferenceValidation ( ) ) { final int lineno = computeLineNo ( it ) ; final URL url = FileSystem . convertStringToURL ( it . getUrl ( ) . toString ( ) , true ) ; if ( URISchemeType . HTTP . isURL ( url ) || URISchemeType . HTTPS . isURL ( url ) || URISchemeType . FTP . isURL ( url ) ) { if ( isRemoteReferenceValidation ( ) ) { final Collection < DynamicValidationComponent > newComponents = createRemoteReferenceValidatorComponents ( it , url , lineno , currentFile , context ) ; if ( newComponents != null && ! newComponents . isEmpty ( ) ) { components . addAll ( newComponents ) ; } } } else if ( URISchemeType . FILE . isURL ( url ) ) { if ( isLocalFileReferenceValidation ( ) ) { final Collection < DynamicValidationComponent > newComponents = createLocalFileValidatorComponents ( it , url , lineno , currentFile , context ) ; if ( newComponents != null && ! newComponents . isEmpty ( ) ) { components . addAll ( newComponents ) ; } } } } return components ; |
public class AppServiceCertificateOrdersInner { /** * Reissue an existing certificate order .
* Reissue an existing certificate order .
* @ param resourceGroupName Name of the resource group to which the resource belongs .
* @ param certificateOrderName Name of the certificate order .
* @ param reissueCertificateOrderRequest Parameters for the reissue .
* @ param serviceCallback the async ServiceCallback to handle successful and failed responses .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the { @ link ServiceFuture } object */
public ServiceFuture < Void > reissueAsync ( String resourceGroupName , String certificateOrderName , ReissueCertificateOrderRequest reissueCertificateOrderRequest , final ServiceCallback < Void > serviceCallback ) { } } | return ServiceFuture . fromResponse ( reissueWithServiceResponseAsync ( resourceGroupName , certificateOrderName , reissueCertificateOrderRequest ) , serviceCallback ) ; |
public class ListSupertaggedSentence { /** * Creates a supertagged sentence where the supertags for each word
* are unobserved . Using this sentence during CCG parsing allows any
* syntactic category to be assigned to each word .
* @ param words
* @ param pos
* @ return */
public static ListSupertaggedSentence createWithUnobservedSupertags ( List < String > words , List < String > pos ) { } } | return new ListSupertaggedSentence ( WordAndPos . createExample ( words , pos ) , Collections . nCopies ( words . size ( ) , Collections . < HeadedSyntacticCategory > emptyList ( ) ) , Collections . nCopies ( words . size ( ) , Collections . < Double > emptyList ( ) ) ) ; |
public class HyphenationState { /** * This method applies the { @ link HyphenationPattern pattern } matching at the given { @ code offset } .
* @ param pattern is the matching { @ link HyphenationPattern pattern } .
* @ param pos is the offset in the word to hyphenate . */
private void apply ( HyphenationPattern pattern , int pos ) { } } | int internalOffset = pos - 2 ; HyphenationPatternPosition [ ] positions = pattern . getHyphenationPositions ( ) ; for ( HyphenationPatternPosition hyphenationPosition : positions ) { int i = hyphenationPosition . index + internalOffset ; if ( ( i >= 0 ) && ( i < this . rankings . length ) && ( hyphenationPosition . ranking > this . rankings [ i ] ) ) { this . rankings [ i ] = hyphenationPosition . ranking ; } } |
public class ElmBaseVisitor { /** * Visit a TupleElement . This method will be called for
* every node in the tree that is a TupleElement .
* @ param elm the ELM tree
* @ param context the context passed to the visitor
* @ return the visitor result */
public T visitTupleElement ( TupleElement elm , C context ) { } } | visitElement ( elm . getValue ( ) , context ) ; return null ; |
public class LoginButton { /** * Provides an implementation for { @ link Activity # onActivityResult
* onActivityResult } that updates the Session based on information returned
* during the authorization flow . The Activity containing this view
* should forward the resulting onActivityResult call here to
* update the Session state based on the contents of the resultCode and
* data .
* @ param requestCode
* The requestCode parameter from the forwarded call . When this
* onActivityResult occurs as part of Facebook authorization
* flow , this value is the activityCode passed to open or
* authorize .
* @ param resultCode
* An int containing the resultCode parameter from the forwarded
* call .
* @ param data
* The Intent passed as the data parameter from the forwarded
* call .
* @ return A boolean indicating whether the requestCode matched a pending
* authorization request for this Session .
* @ see Session # onActivityResult ( Activity , int , int , Intent ) */
public boolean onActivityResult ( int requestCode , int resultCode , Intent data ) { } } | Session session = sessionTracker . getSession ( ) ; if ( session != null ) { return session . onActivityResult ( ( Activity ) getContext ( ) , requestCode , resultCode , data ) ; } else { return false ; } |
public class InterfaceService { /** * Hides current view ( if present ) and shows the view managed by the chosen controller
* @ param viewController will be set as the current view and shown . */
public void show ( final ViewController viewController ) { } } | if ( currentController != null ) { if ( isControllerHiding ) { switchToView ( viewController ) ; } else { hideCurrentViewAndSchedule ( viewController ) ; } } else { switchToView ( viewController ) ; } |
public class RuleBasedBreakIterator { /** * checkDictionary This function handles all processing of characters in
* the " dictionary " set . It will determine the appropriate
* course of action , and possibly set up a cache in the
* process . */
private int checkDictionary ( int startPos , int endPos , boolean reverse ) { } } | // Reset the old break cache first .
reset ( ) ; // note : code segment below assumes that dictionary chars are in the
// startPos - endPos range
// value returned should be next character in sequence
if ( ( endPos - startPos ) <= 1 ) { return ( reverse ? startPos : endPos ) ; } // Starting from the starting point , scan towards the proposed result ,
// looking for the first dictionary character ( which may be the one
// we ' re on , if we ' re starting in the middle of a range ) .
fText . setIndex ( reverse ? endPos : startPos ) ; if ( reverse ) { CharacterIteration . previous32 ( fText ) ; } int rangeStart = startPos ; int rangeEnd = endPos ; int category ; int current ; DictionaryBreakEngine . DequeI breaks = new DictionaryBreakEngine . DequeI ( ) ; int foundBreakCount = 0 ; int c = CharacterIteration . current32 ( fText ) ; category = ( short ) fRData . fTrie . getCodePointValue ( c ) ; // Is the character we ' re starting on a dictionary character ? If so , we
// need to back up to include the entire run ; otherwise the results of
// the break algorithm will differ depending on where we start . Since
// the result is cached and there is typically a non - dictionary break
// within a small number of words , there should be little performance impact .
if ( ( category & 0x4000 ) != 0 ) { if ( reverse ) { do { CharacterIteration . next32 ( fText ) ; c = CharacterIteration . current32 ( fText ) ; category = ( short ) fRData . fTrie . getCodePointValue ( c ) ; } while ( c != CharacterIteration . DONE32 && ( ( category & 0x4000 ) ) != 0 ) ; // Back up to the last dictionary character
rangeEnd = fText . getIndex ( ) ; if ( c == CharacterIteration . DONE32 ) { // c = fText - > last32 ( ) ;
// TODO : why was this if needed ?
c = CharacterIteration . previous32 ( fText ) ; } else { c = CharacterIteration . previous32 ( fText ) ; } } else { do { c = CharacterIteration . previous32 ( fText ) ; category = ( short ) fRData . fTrie . getCodePointValue ( c ) ; } while ( c != CharacterIteration . DONE32 && ( ( category & 0x4000 ) != 0 ) ) ; // Back up to the last dictionary character
if ( c == CharacterIteration . DONE32 ) { // c = fText - > first32 ( ) ;
c = CharacterIteration . current32 ( fText ) ; } else { CharacterIteration . next32 ( fText ) ; c = CharacterIteration . current32 ( fText ) ; } rangeStart = fText . getIndex ( ) ; } category = ( short ) fRData . fTrie . getCodePointValue ( c ) ; } // Loop through the text , looking for ranges of dictionary characters .
// For each span , find the appropriate break engine , and ask it to find
// any breaks within the span .
// Note : we always do this in the forward direction , so that the break
// cache is built in the right order .
if ( reverse ) { fText . setIndex ( rangeStart ) ; c = CharacterIteration . current32 ( fText ) ; category = ( short ) fRData . fTrie . getCodePointValue ( c ) ; } LanguageBreakEngine lbe = null ; while ( true ) { while ( ( current = fText . getIndex ( ) ) < rangeEnd && ( category & 0x4000 ) == 0 ) { CharacterIteration . next32 ( fText ) ; c = CharacterIteration . current32 ( fText ) ; category = ( short ) fRData . fTrie . getCodePointValue ( c ) ; } if ( current >= rangeEnd ) { break ; } // We now have a dictionary character . Get the appropriate language object
// to deal with it .
lbe = getLanguageBreakEngine ( c ) ; // Ask the language object if there are any breaks . It will leave the text
// pointer on the other side of its range , ready to search for the next one .
if ( lbe != null ) { int startingIdx = fText . getIndex ( ) ; foundBreakCount += lbe . findBreaks ( fText , rangeStart , rangeEnd , false , fBreakType , breaks ) ; assert fText . getIndex ( ) > startingIdx ; } // Reload the loop variables for the next go - round
c = CharacterIteration . current32 ( fText ) ; category = ( short ) fRData . fTrie . getCodePointValue ( c ) ; } // If we found breaks , build a new break cache . The first and last entries must
// be the original starting and ending position .
if ( foundBreakCount > 0 ) { if ( foundBreakCount != breaks . size ( ) ) { System . out . println ( "oops, foundBreakCount != breaks.size(). LBE = " + lbe . getClass ( ) ) ; } assert foundBreakCount == breaks . size ( ) ; if ( startPos < breaks . peekLast ( ) ) { breaks . offer ( startPos ) ; } if ( endPos > breaks . peek ( ) ) { breaks . push ( endPos ) ; } // TODO : get rid of this array , use results from the deque directly
fCachedBreakPositions = new int [ breaks . size ( ) ] ; int i = 0 ; while ( breaks . size ( ) > 0 ) { fCachedBreakPositions [ i ++ ] = breaks . pollLast ( ) ; } // If there are breaks , then by definition , we are replacing the original
// proposed break by one of the breaks we found . Use following ( ) and
// preceding ( ) to do the work . They should never recurse in this case .
if ( reverse ) { return preceding ( endPos ) ; } else { return following ( startPos ) ; } } // If we get here , there were no language - based breaks . Set the text pointer
// to the original proposed break .
fText . setIndex ( reverse ? startPos : endPos ) ; return ( reverse ? startPos : endPos ) ; |
public class BeanMap { /** * Returns the accessor for the property with the given name .
* @ param name the name of the property
* @ return the accessor method for the property , or null */
public BeanInvoker getReadInvoker ( String name ) { } } | BeanInvoker invoker ; if ( readInvokers . containsKey ( name ) ) { invoker = readInvokers . get ( name ) ; } else { invoker = getInvoker ( readHandleType . get ( name ) , name ) ; readInvokers . put ( name , invoker ) ; } return invoker ; |
public class AnnotationTypeWriterImpl { /** * Add the navigation summary link .
* @ param builder builder for the member to be documented
* @ param label the label for the navigation
* @ param type type to be documented
* @ param liNav the content tree to which the navigation summary link will be added */
protected void addNavSummaryLink ( MemberSummaryBuilder builder , String label , int type , Content liNav ) { } } | AbstractMemberWriter writer = ( ( AbstractMemberWriter ) builder . getMemberSummaryWriter ( type ) ) ; if ( writer == null ) { liNav . addContent ( getResource ( label ) ) ; } else { liNav . addContent ( writer . getNavSummaryLink ( null , ! builder . getVisibleMemberMap ( type ) . noVisibleMembers ( ) ) ) ; } |
public class ClustersInner { /** * Get the IP address , port of all the compute nodes in the Cluster .
* ServiceResponse < PageImpl < RemoteLoginInformationInner > > * @ param resourceGroupName Name of the resource group to which the resource belongs .
* ServiceResponse < PageImpl < RemoteLoginInformationInner > > * @ param workspaceName The name of the workspace . Workspace names can only contain a combination of alphanumeric characters along with dash ( - ) and underscore ( _ ) . The name must be from 1 through 64 characters long .
* ServiceResponse < PageImpl < RemoteLoginInformationInner > > * @ param clusterName The name of the cluster within the specified resource group . Cluster names can only contain a combination of alphanumeric characters along with dash ( - ) and underscore ( _ ) . The name must be from 1 through 64 characters long .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the PagedList & lt ; RemoteLoginInformationInner & gt ; object wrapped in { @ link ServiceResponse } if successful . */
public Observable < ServiceResponse < Page < RemoteLoginInformationInner > > > listRemoteLoginInformationSinglePageAsync ( final String resourceGroupName , final String workspaceName , final String clusterName ) { } } | if ( resourceGroupName == null ) { throw new IllegalArgumentException ( "Parameter resourceGroupName is required and cannot be null." ) ; } if ( workspaceName == null ) { throw new IllegalArgumentException ( "Parameter workspaceName is required and cannot be null." ) ; } if ( clusterName == null ) { throw new IllegalArgumentException ( "Parameter clusterName is required and cannot be null." ) ; } if ( this . client . subscriptionId ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.subscriptionId() is required and cannot be null." ) ; } if ( this . client . apiVersion ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.apiVersion() is required and cannot be null." ) ; } return service . listRemoteLoginInformation ( resourceGroupName , workspaceName , clusterName , this . client . subscriptionId ( ) , this . client . apiVersion ( ) , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < Page < RemoteLoginInformationInner > > > > ( ) { @ Override public Observable < ServiceResponse < Page < RemoteLoginInformationInner > > > call ( Response < ResponseBody > response ) { try { ServiceResponse < PageImpl < RemoteLoginInformationInner > > result = listRemoteLoginInformationDelegate ( response ) ; return Observable . just ( new ServiceResponse < Page < RemoteLoginInformationInner > > ( result . body ( ) , result . response ( ) ) ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ; |
public class HiveOrcSerDeManager { /** * Determine if a file is ORC format .
* Steal ideas & code from presto / OrcReader under Apache License 2.0. */
private static boolean isORC ( Path file , FileSystem fs ) throws IOException { } } | try { FSDataInputStream inputStream = fs . open ( file ) ; long size = fs . getFileStatus ( file ) . getLen ( ) ; byte [ ] buffer = new byte [ Math . toIntExact ( Math . min ( size , EXPECTED_FOOTER_SIZE ) ) ] ; if ( size < buffer . length ) { return false ; } inputStream . readFully ( size - buffer . length , buffer ) ; // get length of PostScript - last byte of the file
int postScriptSize = buffer [ buffer . length - 1 ] & 0xff ; int magicLen = MAGIC_BUFFER . remaining ( ) ; if ( postScriptSize < magicLen + 1 || postScriptSize >= buffer . length ) { return false ; } if ( ! MAGIC_BUFFER . equals ( ByteBuffer . wrap ( buffer , buffer . length - 1 - magicLen , magicLen ) ) ) { // Old versions of ORC ( 0.11 ) wrote the magic to the head of the file
byte [ ] headerMagic = new byte [ magicLen ] ; inputStream . readFully ( 0 , headerMagic ) ; // if it isn ' t there , this isn ' t an ORC file
if ( ! MAGIC_BUFFER . equals ( ByteBuffer . wrap ( headerMagic ) ) ) { return false ; } } return true ; } catch ( Exception e ) { throw new RuntimeException ( "Error occured when checking the type of file:" + file ) ; } |
public class Expression { /** * Returns true if all referenced expressions are { @ linkplain # isCheap ( ) cheap } . */
public static boolean areAllCheap ( Expression first , Expression ... rest ) { } } | return areAllCheap ( ImmutableList . < Expression > builder ( ) . add ( first ) . add ( rest ) . build ( ) ) ; |
public class AlertService { /** * Creates a new trigger .
* @ param alertId The ID of the alert that will own the trigger .
* @ param trigger The trigger to create having an un - populated ID field .
* @ return The trigger having a populated ID field .
* @ throws IOException If the server cannot be reached .
* @ throws TokenExpiredException If the token sent along with the request has expired */
public List < Trigger > createTrigger ( BigInteger alertId , Trigger trigger ) throws IOException , TokenExpiredException { } } | String requestUrl = RESOURCE + "/" + alertId . toString ( ) + "/triggers" ; ArgusResponse response = getClient ( ) . executeHttpRequest ( ArgusHttpClient . RequestType . POST , requestUrl , trigger ) ; assertValidResponse ( response , requestUrl ) ; return fromJson ( response . getResult ( ) , new TypeReference < List < Trigger > > ( ) { } ) ; |
public class DataColumnConstraintsDao { /** * Query by the unique column values
* @ param constraintName
* constraint name
* @ param constraintType
* constraint type
* @ param value
* value
* @ return data column constraints
* @ throws SQLException
* upon failure */
public DataColumnConstraints queryByUnique ( String constraintName , DataColumnConstraintType constraintType , String value ) throws SQLException { } } | DataColumnConstraints constraint = null ; QueryBuilder < DataColumnConstraints , Void > qb = queryBuilder ( ) ; setUniqueWhere ( qb . where ( ) , constraintName , constraintType , value ) ; List < DataColumnConstraints > constraints = qb . query ( ) ; if ( ! constraints . isEmpty ( ) ) { if ( constraints . size ( ) > 1 ) { throw new GeoPackageException ( "More than one " + DataColumnConstraints . class . getSimpleName ( ) + " was found for unique constraint. Name: " + constraintName + ", Type: " + constraintType + ", Value: " + value ) ; } constraint = constraints . get ( 0 ) ; } return constraint ; |
public class HlpEntitiesPage { /** * < p > Make SQL WHERE clause for date - time if need . < / p >
* @ param pSbWhere result clause
* @ param pRequestData - Request Data
* @ param pNameEntity - entity name
* @ param pFldNm - field name
* @ param pParSuffix - parameter suffix
* @ param pFilterMap - map to store current filter
* @ param pFilterAppearance - set to store current filter appearance
* if null - not required
* @ throws Exception - an Exception */
public final void tryMakeWhereDateTime ( final StringBuffer pSbWhere , final IRequestData pRequestData , final String pNameEntity , final String pFldNm , final String pParSuffix , final Map < String , Object > pFilterMap , final Set < String > pFilterAppearance ) throws Exception { } } | String nmRnd = pRequestData . getParameter ( "nmRnd" ) ; String fltOrdPrefix ; if ( nmRnd != null && nmRnd . contains ( "pickerDub" ) ) { fltOrdPrefix = "fltordPD" ; } else if ( nmRnd != null && nmRnd . contains ( "picker" ) ) { fltOrdPrefix = "fltordP" ; } else { fltOrdPrefix = "fltordM" ; } String fltforcedName = fltOrdPrefix + "forcedFor" ; String fltforced = pRequestData . getParameter ( fltforcedName ) ; if ( fltforced != null ) { pFilterMap . put ( fltforcedName , fltforced ) ; } String nmFldVal = fltOrdPrefix + pFldNm + "Val" + pParSuffix ; String fltVal = pRequestData . getParameter ( nmFldVal ) ; String nmFldOpr = fltOrdPrefix + pFldNm + "Opr" + pParSuffix ; String valFldOpr = pRequestData . getParameter ( nmFldOpr ) ; String cond = null ; if ( "isnotnull" . equals ( valFldOpr ) || "isnull" . equals ( valFldOpr ) ) { cond = pNameEntity . toUpperCase ( ) + "." + pFldNm . toUpperCase ( ) + " " + toSqlOperator ( valFldOpr ) ; } else if ( fltVal != null && valFldOpr != null && ! valFldOpr . equals ( "disabled" ) && ! valFldOpr . equals ( "" ) ) { Date valDt ; if ( fltVal . contains ( "." ) ) { // 2001-07-04T12:08:56.235
valDt = this . srvDate . fromIso8601FullNoTz ( fltVal , null ) ; } else if ( fltVal . contains ( ":" ) ) { if ( fltVal . length ( ) == 19 ) { // 2001-07-04T12:08:56
valDt = this . srvDate . fromIso8601DateTimeSecNoTz ( fltVal , null ) ; } else { // 2001-07-04T12:08
valDt = this . srvDate . fromIso8601DateTimeNoTz ( fltVal , null ) ; } } else { // 2001-07-04
valDt = this . srvDate . fromIso8601DateNoTz ( fltVal , null ) ; } cond = pNameEntity . toUpperCase ( ) + "." + pFldNm . toUpperCase ( ) + " " + toSqlOperator ( valFldOpr ) + " " + valDt . getTime ( ) ; } if ( cond != null ) { pFilterMap . put ( nmFldVal , fltVal ) ; pFilterMap . put ( nmFldOpr , valFldOpr ) ; if ( pFilterAppearance != null ) { pFilterAppearance . add ( getSrvI18n ( ) . getMsg ( pFldNm ) + " " + getSrvI18n ( ) . getMsg ( valFldOpr ) + " " + fltVal ) ; } if ( pSbWhere . toString ( ) . length ( ) == 0 ) { pSbWhere . append ( cond ) ; } else { pSbWhere . append ( " and " + cond ) ; } } |
public class DateUtil { /** * 根据特定格式格式化日期
* @ param date 被格式化的日期
* @ param format { @ link DatePrinter } 或 { @ link FastDateFormat }
* @ return 格式化后的字符串 */
public static String format ( Date date , DatePrinter format ) { } } | if ( null == format || null == date ) { return null ; } return format . format ( date ) ; |
public class AmazonElastiCacheClient { /** * Creates a new cache subnet group .
* Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud ( Amazon VPC ) .
* @ param createCacheSubnetGroupRequest
* Represents the input of a < code > CreateCacheSubnetGroup < / code > operation .
* @ return Result of the CreateCacheSubnetGroup operation returned by the service .
* @ throws CacheSubnetGroupAlreadyExistsException
* The requested cache subnet group name is already in use by an existing cache subnet group .
* @ throws CacheSubnetGroupQuotaExceededException
* The request cannot be processed because it would exceed the allowed number of cache subnet groups .
* @ throws CacheSubnetQuotaExceededException
* The request cannot be processed because it would exceed the allowed number of subnets in a cache subnet
* group .
* @ throws InvalidSubnetException
* An invalid subnet identifier was specified .
* @ sample AmazonElastiCache . CreateCacheSubnetGroup
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / elasticache - 2015-02-02 / CreateCacheSubnetGroup "
* target = " _ top " > AWS API Documentation < / a > */
@ Override public CacheSubnetGroup createCacheSubnetGroup ( CreateCacheSubnetGroupRequest request ) { } } | request = beforeClientExecution ( request ) ; return executeCreateCacheSubnetGroup ( request ) ; |
public class IOUtil { /** * Writes text to file */
public static void writeText ( String text , File output ) { } } | PrintWriter pw = null ; try { pw = new PrintWriter ( new FileWriter ( output ) ) ; pw . write ( text ) ; } catch ( Exception e ) { throw new MockitoException ( "Problems writing text to file: " + output , e ) ; } finally { close ( pw ) ; } |
public class BlurDialogEngine { /** * Retrieve offset introduce by the navigation bar .
* @ return bottom offset due to navigation bar . */
private int getNavigationBarOffset ( ) { } } | int result = 0 ; Resources resources = mHoldingActivity . getResources ( ) ; if ( Build . VERSION . SDK_INT >= Build . VERSION_CODES . LOLLIPOP ) { int resourceId = resources . getIdentifier ( "navigation_bar_height" , "dimen" , "android" ) ; if ( resourceId > 0 ) { result = resources . getDimensionPixelSize ( resourceId ) ; } } return result ; |
public class ResourceGridScreen { /** * Process the command .
* < br / > Step 1 - Process the command if possible and return true if processed .
* < br / > Step 2 - If I can ' t process , pass to all children ( with me as the source ) .
* < br / > Step 3 - If children didn ' t process , pass to parent ( with me as the source ) .
* < br / > Note : Never pass to a parent or child that matches the source ( to avoid an endless loop ) .
* @ param strCommand The command to process .
* @ param sourceSField The source screen field ( to avoid echos ) .
* @ param iCommandOptions If this command creates a new screen , create in a new window ?
* @ return true if success . */
public boolean doCommand ( String strCommand , ScreenField sourceSField , int iCommandOptions ) { } } | if ( strCommand . equalsIgnoreCase ( MenuConstants . FORMDETAIL ) ) return ( this . onForm ( null , ScreenConstants . DETAIL_MODE , true , iCommandOptions , null ) != null ) ; else return super . doCommand ( strCommand , sourceSField , iCommandOptions ) ; |
public class OSGiInjectionScopeData { /** * Unregister this scope data with its parent if necessary for deferred
* reference data processing . */
private synchronized void disableDeferredReferenceData ( ) { } } | deferredReferenceDataEnabled = false ; if ( parent != null && deferredReferenceDatas != null ) { parent . removeDeferredReferenceData ( this ) ; deferredReferenceDatas = null ; } |
public class BasicQueryOutputProcessor { /** * { @ inheritDoc } */
public List < Map < String , Object > > toMapList ( List < QueryParameters > paramsList ) { } } | List < Map < String , Object > > result = new ArrayList < Map < String , Object > > ( ) ; Iterator < QueryParameters > iterator = paramsList . iterator ( ) ; // skipping header
if ( iterator . hasNext ( ) == true ) { iterator . next ( ) ; } while ( iterator . hasNext ( ) == true ) { result . add ( iterator . next ( ) . toMap ( ) ) ; } return result ; |
public class LostExceptionStackTrace { /** * implements the visitor to find throwing alternative exceptions from a catch block , without forwarding along the original exception */
@ Override public void sawOpcode ( int seen ) { } } | boolean markAsValid = false ; try { stack . precomputation ( this ) ; int pc = getPC ( ) ; for ( CodeException ex : exceptions ) { if ( pc == ex . getEndPC ( ) ) { if ( OpcodeUtils . isReturn ( seen ) ) { addCatchBlock ( ex . getHandlerPC ( ) , Integer . MAX_VALUE ) ; } else if ( ( seen == Const . GOTO ) || ( seen == Const . GOTO_W ) ) { addCatchBlock ( ex . getHandlerPC ( ) , this . getBranchTarget ( ) ) ; } else { addCatchBlock ( ex . getHandlerPC ( ) , Integer . MAX_VALUE ) ; } } else if ( pc == ex . getHandlerPC ( ) ) { removePreviousHandlers ( pc ) ; } } Iterator < CatchInfo > it = catchInfos . iterator ( ) ; while ( it . hasNext ( ) ) { try { CatchInfo catchInfo = it . next ( ) ; if ( pc == catchInfo . getStart ( ) ) { if ( ! updateExceptionRegister ( catchInfo , seen , pc ) ) { it . remove ( ) ; } break ; } else if ( pc > catchInfo . getFinish ( ) ) { it . remove ( ) ; break ; } else if ( ( pc > catchInfo . getStart ( ) ) && ( pc <= catchInfo . getFinish ( ) ) ) { if ( seen == Const . INVOKESPECIAL ) { if ( Values . CONSTRUCTOR . equals ( getNameConstantOperand ( ) ) ) { String className = getClassConstantOperand ( ) ; JavaClass exClass = Repository . lookupClass ( className ) ; if ( exClass . instanceOf ( throwableClass ) ) { String sig = getSigConstantOperand ( ) ; if ( ( sig . indexOf ( "Exception" ) >= 0 ) || ( sig . indexOf ( "Throwable" ) >= 0 ) || ( sig . indexOf ( "Error" ) >= 0 ) ) { markAsValid = true ; break ; } if ( exClass . instanceOf ( assertionClass ) ) { // just ignore LEST for AssertionErrors
markAsValid = true ; break ; } } } else if ( isPossibleExBuilder ( catchInfo . getRegister ( ) ) ) { markAsValid = true ; } } else if ( seen == Const . INVOKEVIRTUAL ) { String methodName = getNameConstantOperand ( ) ; if ( "initCause" . equals ( methodName ) || "addSuppressed" . equals ( methodName ) ) { if ( stack . getStackDepth ( ) > 1 ) { String className = getClassConstantOperand ( ) ; JavaClass exClass = Repository . lookupClass ( className ) ; if ( exClass . instanceOf ( throwableClass ) ) { OpcodeStack . Item itm = stack . getStackItem ( 1 ) ; int reg = itm . getRegisterNumber ( ) ; if ( reg >= 0 ) { exReg . put ( Integer . valueOf ( reg ) , Boolean . TRUE ) ; } markAsValid = true ; // Fixes javac generated code
} } } else if ( ( ( "getTargetException" . equals ( methodName ) || "getCause" . equals ( methodName ) ) && "java/lang/reflect/InvocationTargetException" . equals ( getClassConstantOperand ( ) ) ) || "java/io/UncheckedIOException" . equals ( getClassConstantOperand ( ) ) ) { markAsValid = true ; } else if ( isPossibleExBuilder ( catchInfo . getRegister ( ) ) ) { markAsValid = true ; } } else if ( ( seen == Const . INVOKEINTERFACE ) || ( seen == Const . INVOKESTATIC ) ) { if ( isPossibleExBuilder ( catchInfo . getRegister ( ) ) ) { markAsValid = true ; } } else if ( seen == Const . ATHROW ) { if ( stack . getStackDepth ( ) > 0 ) { OpcodeStack . Item itm = stack . getStackItem ( 0 ) ; if ( ( itm . getRegisterNumber ( ) != catchInfo . getRegister ( ) ) && ( itm . getUserValue ( ) == null ) ) { if ( ! isPre14Class ( itm . getJavaClass ( ) ) ) { int priority = getPrevOpcode ( 1 ) == Const . MONITOREXIT ? LOW_PRIORITY : NORMAL_PRIORITY ; bugReporter . reportBug ( new BugInstance ( this , BugType . LEST_LOST_EXCEPTION_STACK_TRACE . name ( ) , priority ) . addClass ( this ) . addMethod ( this ) . addSourceLine ( this ) ) ; } it . remove ( ) ; break ; } } if ( catchInfo . getFinish ( ) == Integer . MAX_VALUE ) { catchInfo . setFinish ( pc ) ; } } else if ( OpcodeUtils . isAStore ( seen ) ) { if ( lastWasExitPoint ) { // crazy jdk6 finally block injection - - shut
// off detection
catchInfos . clear ( ) ; break ; } if ( stack . getStackDepth ( ) > 0 ) { OpcodeStack . Item itm = stack . getStackItem ( 0 ) ; int reg = RegisterUtils . getAStoreReg ( this , seen ) ; exReg . put ( Integer . valueOf ( reg ) , ( Boolean ) itm . getUserValue ( ) ) ; if ( ( reg == catchInfo . getRegister ( ) ) && ( catchInfo . getFinish ( ) == Integer . MAX_VALUE ) ) { it . remove ( ) ; } } } else if ( OpcodeUtils . isALoad ( seen ) ) { Boolean valid = exReg . get ( Integer . valueOf ( RegisterUtils . getALoadReg ( this , seen ) ) ) ; if ( valid != null ) { markAsValid = valid . booleanValue ( ) ; } } else if ( OpcodeUtils . isReturn ( seen ) ) { removeIndeterminateHandlers ( pc ) ; break ; } } } catch ( ClassNotFoundException cnfe ) { bugReporter . reportMissingClass ( cnfe ) ; it . remove ( ) ; } } lastWasExitPoint = ( seen == Const . GOTO ) || ( seen == Const . GOTO_W ) || ( seen == Const . ATHROW ) || OpcodeUtils . isReturn ( seen ) ; } finally { TernaryPatcher . pre ( stack , seen ) ; stack . sawOpcode ( this , seen ) ; TernaryPatcher . post ( stack , seen ) ; if ( markAsValid && ( stack . getStackDepth ( ) > 0 ) ) { OpcodeStack . Item itm = stack . getStackItem ( 0 ) ; itm . setUserValue ( Boolean . TRUE ) ; } } |
public class HandlerSocketConnectorImpl { /** * ( non - Javadoc )
* @ see
* com . google . code . hs4j . network . hs . HandlerCOnnector # send ( com . google . code
* . hs4j . Command ) */
public void send ( final Command msg ) throws HandlerSocketException { } } | Session session = this . selectSession ( ) ; session . write ( msg ) ; |
public class DemuxingIoHandler { /** * Registers a { @ link MessageHandler } that handles the sent messages of the
* specified < code > type < / code > .
* @ return the old handler if there is already a registered handler for
* the specified < tt > type < / tt > . < tt > null < / tt > otherwise . */
@ SuppressWarnings ( "unchecked" ) public < E > MessageHandler < ? super E > addSentMessageHandler ( Class < E > type , MessageHandler < ? super E > handler ) { } } | sentMessageHandlerCache . clear ( ) ; return ( MessageHandler < ? super E > ) sentMessageHandlers . put ( type , handler ) ; |
public class Iterators { /** * Filters another iterator by eliminating duplicates . */
public static < T > Iterable < T > removeDups ( final Iterable < T > base ) { } } | return new Iterable < T > ( ) { public Iterator < T > iterator ( ) { return removeDups ( base . iterator ( ) ) ; } } ; |
public class UpdateExchangeRates { /** * Runs the example .
* @ param adManagerServices the services factory .
* @ param session the session .
* @ param exchangeRateId the ID of the exchange rate to update .
* @ throws ApiException if the API request failed with one or more service errors .
* @ throws RemoteException if the API request failed due to other errors . */
public static void runExample ( AdManagerServices adManagerServices , AdManagerSession session , long exchangeRateId ) throws RemoteException { } } | // Get the ExchangeRateService .
ExchangeRateServiceInterface exchangeRateService = adManagerServices . get ( session , ExchangeRateServiceInterface . class ) ; // Create a statement to only select a single exchange rate by ID .
StatementBuilder statementBuilder = new StatementBuilder ( ) . where ( "id = :id and refreshRate = :refreshRate" ) . orderBy ( "id ASC" ) . limit ( 1 ) . withBindVariableValue ( "id" , exchangeRateId ) . withBindVariableValue ( "refreshRate" , ExchangeRateRefreshRate . FIXED ) ; // Get the exchange rate .
ExchangeRatePage page = exchangeRateService . getExchangeRatesByStatement ( statementBuilder . toStatement ( ) ) ; ExchangeRate exchangeRate = Iterables . getOnlyElement ( Arrays . asList ( page . getResults ( ) ) ) ; // Update the exchange rate value to 1.5.
exchangeRate . setExchangeRate ( 15000000000L ) ; // Update the exchange rate on the server .
ExchangeRate [ ] exchangeRates = exchangeRateService . updateExchangeRates ( new ExchangeRate [ ] { exchangeRate } ) ; for ( ExchangeRate updatedExchangeRate : exchangeRates ) { System . out . printf ( "Exchange rate with ID %d, currency code '%s'," + " direction '%s', and exchange rate %.2f was updated.%n" , updatedExchangeRate . getId ( ) , updatedExchangeRate . getCurrencyCode ( ) , updatedExchangeRate . getDirection ( ) . getValue ( ) , ( updatedExchangeRate . getExchangeRate ( ) / 10000000000f ) ) ; } |
public class Log { /** * Send a { @ link # Constants . WARN } log message .
* @ param msg
* The message you would like logged . */
public static int w ( String msg ) { } } | // This is a quick check to avoid the expensive stack trace reflection .
if ( ! activated ) { return 0 ; } String caller = LogHelper . getCaller ( ) ; if ( caller != null ) { return w ( caller , msg ) ; } return 0 ; |
public class BCFRecordReader { /** * For compressed BCF , unless the end has been reached , this is quite
* inaccurate . */
@ Override public float getProgress ( ) { } } | if ( length == 0 ) return 1 ; if ( ! isBGZF ) return ( float ) ( in . getPosition ( ) - fileStart ) / length ; try { if ( in . peek ( ) == - 1 ) return 1 ; } catch ( IOException e ) { return 1 ; } // Add 1 to the denominator to make sure that we never report 1 here .
return ( float ) ( ( bci . getFilePointer ( ) >>> 16 ) - fileStart ) / ( length + 1 ) ; |
public class EditShape { /** * always succeeds */
void setClosedPath ( int path , boolean b_yes_no ) { } } | if ( isClosedPath ( path ) == b_yes_no ) return ; if ( getPathSize ( path ) > 0 ) { int first = getFirstVertex ( path ) ; int last = getLastVertex ( path ) ; if ( b_yes_no ) { // make a circular list
setNextVertex_ ( last , first ) ; setPrevVertex_ ( first , last ) ; // set segment to NULL ( just in case )
int vindex = getVertexIndex ( last ) ; setSegmentToIndex_ ( vindex , null ) ; } else { setNextVertex_ ( last , - 1 ) ; setPrevVertex_ ( first , - 1 ) ; int vindex = getVertexIndex ( last ) ; setSegmentToIndex_ ( vindex , null ) ; } } int oldflags = getPathFlags_ ( path ) ; int flags = ( oldflags | ( int ) PathFlags_ . closedPath ) - ( int ) PathFlags_ . closedPath ; // clear the bit ;
setPathFlags_ ( path , flags | ( b_yes_no ? ( int ) PathFlags_ . closedPath : 0 ) ) ; |
public class cachepolicy { /** * Use this API to add cachepolicy . */
public static base_response add ( nitro_service client , cachepolicy resource ) throws Exception { } } | cachepolicy addresource = new cachepolicy ( ) ; addresource . policyname = resource . policyname ; addresource . rule = resource . rule ; addresource . action = resource . action ; addresource . storeingroup = resource . storeingroup ; addresource . invalgroups = resource . invalgroups ; addresource . invalobjects = resource . invalobjects ; addresource . undefaction = resource . undefaction ; return addresource . add_resource ( client ) ; |
public class ClosureBundler { /** * Append the contents of the string to the supplied appendable . */
public void appendTo ( Appendable out , DependencyInfo info , String content ) throws IOException { } } | appendTo ( out , info , CharSource . wrap ( content ) ) ; |
public class ObjectEnvelope { /** * Sets the initial MoificationState of the wrapped object myObj . The initial state will be StateNewDirty if myObj
* is not persisten already . The state will be set to StateOldClean if the object is already persistent . */
private void prepareInitialState ( boolean isNewObject ) { } } | // determine appropriate modification state
ModificationState initialState ; if ( isNewObject ) { // if object is not already persistent it must be marked as new
// it must be marked as dirty because it must be stored even if it will not modified during tx
initialState = StateNewDirty . getInstance ( ) ; } else if ( isDeleted ( oid ) ) { // if object is already persistent it will be marked as old .
// it is marked as dirty as it has been deleted during tx and now it is inserted again ,
// possibly with new field values .
initialState = StateOldDirty . getInstance ( ) ; } else { // if object is already persistent it will be marked as old .
// it is marked as clean as it has not been modified during tx already
initialState = StateOldClean . getInstance ( ) ; } // remember it :
modificationState = initialState ; |
public class GetApiMappingsResult { /** * The elements from this collection .
* < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use
* { @ link # setItems ( java . util . Collection ) } or { @ link # withItems ( java . util . Collection ) } if you want to override the
* existing values .
* @ param items
* The elements from this collection .
* @ return Returns a reference to this object so that method calls can be chained together . */
public GetApiMappingsResult withItems ( ApiMapping ... items ) { } } | if ( this . items == null ) { setItems ( new java . util . ArrayList < ApiMapping > ( items . length ) ) ; } for ( ApiMapping ele : items ) { this . items . add ( ele ) ; } return this ; |
public class TensorInfo { /** * < code > optional . tensorflow . DataType dtype = 2 ; < / code > */
public org . tensorflow . framework . DataType getDtype ( ) { } } | org . tensorflow . framework . DataType result = org . tensorflow . framework . DataType . valueOf ( dtype_ ) ; return result == null ? org . tensorflow . framework . DataType . UNRECOGNIZED : result ; |
public class MapPrinter { /** * Return the available format ids . */
public final Set < String > getOutputFormatsNames ( ) { } } | SortedSet < String > formats = new TreeSet < > ( ) ; for ( String formatBeanName : this . outputFormat . keySet ( ) ) { int endingIndex = formatBeanName . indexOf ( MAP_OUTPUT_FORMAT_BEAN_NAME_ENDING ) ; if ( endingIndex < 0 ) { endingIndex = formatBeanName . indexOf ( OUTPUT_FORMAT_BEAN_NAME_ENDING ) ; } formats . add ( formatBeanName . substring ( 0 , endingIndex ) ) ; } return formats ; |
public class ApiOvhEmaildomain { /** * Create new rule for filter
* REST : POST / email / domain / { domain } / account / { accountName } / filter / { name } / rule
* @ param value [ required ] Rule parameter of filter
* @ param operand [ required ] Rule of filter
* @ param header [ required ] Header to be filtered
* @ param domain [ required ] Name of your domain name
* @ param accountName [ required ] Name of account
* @ param name [ required ] Filter name */
public OvhTaskFilter domain_account_accountName_filter_name_rule_POST ( String domain , String accountName , String name , String header , OvhDomainFilterOperandEnum operand , String value ) throws IOException { } } | String qPath = "/email/domain/{domain}/account/{accountName}/filter/{name}/rule" ; StringBuilder sb = path ( qPath , domain , accountName , name ) ; HashMap < String , Object > o = new HashMap < String , Object > ( ) ; addBody ( o , "header" , header ) ; addBody ( o , "operand" , operand ) ; addBody ( o , "value" , value ) ; String resp = exec ( qPath , "POST" , sb . toString ( ) , o ) ; return convertTo ( resp , OvhTaskFilter . class ) ; |
public class KnowledgeBaseManagerSparql { /** * This method checks a set of fetching tasks that are pending to validate their adequate conclusion .
* For each of the fetching tasks launched we will track the models that were not adequately loaded for ulterior
* uploading .
* @ param concurrentTasks the Map with the models to fetch and the Future providing the state of the uploading
* @ return True if all the models were adequately uploaded . False otherwise . */
private boolean checkFetchingTasks ( Map < URI , Future < Boolean > > concurrentTasks ) { } } | boolean result = true ; Boolean fetched ; for ( URI modelUri : concurrentTasks . keySet ( ) ) { Future < Boolean > f = concurrentTasks . get ( modelUri ) ; try { fetched = f . get ( ) ; result = result && fetched ; // Track unreachability
if ( fetched && this . unreachableModels . containsKey ( modelUri ) ) { this . unreachableModels . remove ( modelUri ) ; log . info ( "A previously unreachable model has finally been obtained - {}" , modelUri ) ; } if ( ! fetched ) { this . unreachableModels . put ( modelUri , new Date ( ) ) ; log . error ( "Cannot load " + modelUri + ". Marked as invalid" ) ; } } catch ( Exception e ) { // Mark as invalid
log . error ( "There was an error while trying to fetch a remote model" , e ) ; this . unreachableModels . put ( modelUri , new Date ( ) ) ; log . info ( "Added {} to the unreachable models list." , modelUri ) ; result = false ; } } return result ; |
public class ParsePackage { /** * Method processFormula .
* Parse a formula which is just one action expression followed by
* a semicolon .
* @ return Rule
* @ throws ParserException */
private AbstractRule processFormula ( RulesTextProvider textProvider ) { } } | log . debug ( "processFormula" ) ; Formula rule = new Formula ( ) ; int start = textProvider . getPos ( ) ; LoadCommonRuleData ( rule , textProvider ) ; textProvider . addTOCElement ( null , rule . getDescription ( ) , start , textProvider . getPos ( ) , TYPE_FORMULA ) ; exactOrError ( "{" , textProvider ) ; Expression expression = processAction ( textProvider ) ; exactOrError ( "}" , textProvider ) ; rule . addAction ( expression ) ; return rule ; |
public class AuthCallsIpAccessControlListMappingReader { /** * Make the request to the Twilio API to perform the read .
* @ param client TwilioRestClient with which to make the request
* @ return AuthCallsIpAccessControlListMapping ResourceSet */
@ Override public ResourceSet < AuthCallsIpAccessControlListMapping > read ( final TwilioRestClient client ) { } } | return new ResourceSet < > ( this , client , firstPage ( client ) ) ; |
public class ImageLoader { /** * Loads and decodes image synchronously . < br / >
* Default display image options
* { @ linkplain ImageLoaderConfiguration . Builder # defaultDisplayImageOptions ( DisplayImageOptions ) from
* configuration } will be used . < br / >
* < b > NOTE : < / b > { @ link # init ( ImageLoaderConfiguration ) } method must be called before this method call
* @ param uri Image URI ( i . e . " http : / / site . com / image . png " , " file : / / / mnt / sdcard / image . png " )
* @ param targetImageSize Minimal size for { @ link Bitmap } which will be returned . Downloaded image will be decoded
* and scaled to { @ link Bitmap } of the size which is < b > equal or larger < / b > ( usually a bit
* larger ) than incoming targetImageSize .
* @ return Result image Bitmap . Can be < b > null < / b > if image loading / decoding was failed or cancelled .
* @ throws IllegalStateException if { @ link # init ( ImageLoaderConfiguration ) } method wasn ' t called before */
public Bitmap loadImageSync ( String uri , ImageSize targetImageSize ) { } } | return loadImageSync ( uri , targetImageSize , null ) ; |
public class DeleteInvitationsResult { /** * A list of account ID and email address pairs of the AWS accounts that could not be processed .
* @ param unprocessedAccounts
* A list of account ID and email address pairs of the AWS accounts that could not be processed . */
public void setUnprocessedAccounts ( java . util . Collection < Result > unprocessedAccounts ) { } } | if ( unprocessedAccounts == null ) { this . unprocessedAccounts = null ; return ; } this . unprocessedAccounts = new java . util . ArrayList < Result > ( unprocessedAccounts ) ; |
public class SysUtil { /** * Determine how preferred a given ABI is on this system .
* @ param supportedAbis ABIs on this system
* @ param abi ABI of a shared library we might want to unpack
* @ return - 1 if not supported or an integer , smaller being more preferred */
public static int findAbiScore ( String [ ] supportedAbis , String abi ) { } } | for ( int i = 0 ; i < supportedAbis . length ; ++ i ) { if ( supportedAbis [ i ] != null && abi . equals ( supportedAbis [ i ] ) ) { return i ; } } return - 1 ; |
public class UriEscape { /** * Perform am URI fragment identifier < strong > escape < / strong > operation
* on a < tt > Reader < / tt > input using < tt > UTF - 8 < / tt > as encoding ,
* writing results to a < tt > Writer < / tt > .
* The following are the only allowed chars in an URI fragment identifier ( will not be escaped ) :
* < ul >
* < li > < tt > A - Z a - z 0-9 < / tt > < / li >
* < li > < tt > - . _ ~ < / tt > < / li >
* < li > < tt > ! $ & amp ; ' ( ) * + , ; = < / tt > < / li >
* < li > < tt > : @ < / tt > < / li >
* < li > < tt > / ? < / tt > < / li >
* < / ul >
* All other chars will be escaped by converting them to the sequence of bytes that
* represents them in the < tt > UTF - 8 < / tt > and then representing each byte
* in < tt > % HH < / tt > syntax , being < tt > HH < / tt > the hexadecimal representation of the byte .
* This method is < strong > thread - safe < / strong > .
* @ param reader the < tt > Reader < / tt > reading the text to be escaped .
* @ param writer the < tt > java . io . Writer < / tt > to which the escaped result will be written . Nothing will
* be written at all to this writer if input is < tt > null < / tt > .
* @ throws IOException if an input / output exception occurs
* @ since 1.1.2 */
public static void escapeUriFragmentId ( final Reader reader , final Writer writer ) throws IOException { } } | escapeUriFragmentId ( reader , writer , DEFAULT_ENCODING ) ; |
public class VFSUtils { /** * Add manifest paths
* @ param file the file
* @ param paths the paths to add to
* @ throws IOException if there is an error reading the manifest or the virtual file is closed
* @ throws IllegalStateException if the file has no parent
* @ throws IllegalArgumentException for a null file or paths */
public static void addManifestLocations ( VirtualFile file , List < VirtualFile > paths ) throws IOException { } } | if ( file == null ) { throw MESSAGES . nullArgument ( "file" ) ; } if ( paths == null ) { throw MESSAGES . nullArgument ( "paths" ) ; } boolean trace = VFSLogger . ROOT_LOGGER . isTraceEnabled ( ) ; Manifest manifest = getManifest ( file ) ; if ( manifest == null ) { return ; } Attributes mainAttributes = manifest . getMainAttributes ( ) ; String classPath = mainAttributes . getValue ( Attributes . Name . CLASS_PATH ) ; if ( classPath == null ) { if ( trace ) { VFSLogger . ROOT_LOGGER . tracef ( "Manifest has no Class-Path for %s" , file . getPathName ( ) ) ; } return ; } VirtualFile parent = file . getParent ( ) ; if ( parent == null ) { VFSLogger . ROOT_LOGGER . debugf ( "%s has no parent." , file ) ; return ; } if ( trace ) { VFSLogger . ROOT_LOGGER . tracef ( "Parsing Class-Path: %s for %s parent=%s" , classPath , file . getName ( ) , parent . getName ( ) ) ; } StringTokenizer tokenizer = new StringTokenizer ( classPath ) ; while ( tokenizer . hasMoreTokens ( ) ) { String path = tokenizer . nextToken ( ) ; try { VirtualFile vf = parent . getChild ( path ) ; if ( vf . exists ( ) ) { if ( paths . contains ( vf ) == false ) { paths . add ( vf ) ; // Recursively process the jar
Automounter . mount ( file , vf ) ; addManifestLocations ( vf , paths ) ; } else if ( trace ) { VFSLogger . ROOT_LOGGER . tracef ( "%s from manifest is already in the classpath %s" , vf . getName ( ) , paths ) ; } } else if ( trace ) { VFSLogger . ROOT_LOGGER . trace ( "Unable to find " + path + " from " + parent . getName ( ) ) ; } } catch ( IOException e ) { VFSLogger . ROOT_LOGGER . debugf ( "Manifest Class-Path entry %s ignored for %s reason= %s" , path , file . getPathName ( ) , e ) ; } } |
public class ClassBuilder { /** * Handles the { @ literal < TypeElement > } tag .
* @ param node the XML element that specifies which components to document
* @ param contentTree the content tree to which the documentation will be added
* @ throws DocletException if there is a problem while building the documentation */
public void buildClassDoc ( XMLNode node , Content contentTree ) throws DocletException { } } | String key ; if ( isInterface ) { key = "doclet.Interface" ; } else if ( isEnum ) { key = "doclet.Enum" ; } else { key = "doclet.Class" ; } contentTree = writer . getHeader ( configuration . getText ( key ) + " " + utils . getSimpleName ( typeElement ) ) ; Content classContentTree = writer . getClassContentHeader ( ) ; buildChildren ( node , classContentTree ) ; writer . addClassContentTree ( contentTree , classContentTree ) ; writer . addFooter ( contentTree ) ; writer . printDocument ( contentTree ) ; copyDocFiles ( ) ; |
public class XDataManager { /** * Get the XDataManager for the given XMPP connection .
* @ param connection the XMPPConnection .
* @ return the XDataManager */
public static synchronized XDataManager getInstanceFor ( XMPPConnection connection ) { } } | XDataManager xDataManager = INSTANCES . get ( connection ) ; if ( xDataManager == null ) { xDataManager = new XDataManager ( connection ) ; INSTANCES . put ( connection , xDataManager ) ; } return xDataManager ; |
public class AbstractJobLauncher { /** * Takes a { @ link List } of { @ link Tag } s and returns a new { @ link List } with the original { @ link Tag } s as well as any
* additional { @ link Tag } s returned by { @ link ClusterNameTags # getClusterNameTags ( ) } .
* @ see ClusterNameTags */
private static List < Tag < ? > > addClusterNameTags ( List < ? extends Tag < ? > > tags ) { } } | return ImmutableList . < Tag < ? > > builder ( ) . addAll ( tags ) . addAll ( Tag . fromMap ( ClusterNameTags . getClusterNameTags ( ) ) ) . build ( ) ; |
public class ReduceOps { /** * Constructs a { @ code TerminalOp } that implements a mutable reduce on
* { @ code long } values .
* @ param < R > the type of the result
* @ param supplier a factory to produce a new accumulator of the result type
* @ param accumulator a function to incorporate an int into an
* accumulator
* @ param combiner a function to combine an accumulator into another
* @ return a { @ code TerminalOp } implementing the reduction */
public static < R > TerminalOp < Long , R > makeLong ( Supplier < R > supplier , ObjLongConsumer < R > accumulator , BinaryOperator < R > combiner ) { } } | Objects . requireNonNull ( supplier ) ; Objects . requireNonNull ( accumulator ) ; Objects . requireNonNull ( combiner ) ; class ReducingSink extends Box < R > implements AccumulatingSink < Long , R , ReducingSink > , Sink . OfLong { @ Override public void begin ( long size ) { state = supplier . get ( ) ; } @ Override public void accept ( long t ) { accumulator . accept ( state , t ) ; } @ Override public void combine ( ReducingSink other ) { state = combiner . apply ( state , other . state ) ; } } return new ReduceOp < Long , R , ReducingSink > ( StreamShape . LONG_VALUE ) { @ Override public ReducingSink makeSink ( ) { return new ReducingSink ( ) ; } } ; |
public class DeletePlacementGroupRequest { /** * This method is intended for internal use only . Returns the marshaled request configured with additional
* parameters to enable operation dry - run . */
@ Override public Request < DeletePlacementGroupRequest > getDryRunRequest ( ) { } } | Request < DeletePlacementGroupRequest > request = new DeletePlacementGroupRequestMarshaller ( ) . marshall ( this ) ; request . addParameter ( "DryRun" , Boolean . toString ( true ) ) ; return request ; |
public class OAuthProfileCreator { /** * Retrieve the user profile from the access token .
* @ param context the web context
* @ param accessToken the access token
* @ return the user profile */
protected Optional < UserProfile > retrieveUserProfileFromToken ( final WebContext context , final T accessToken ) { } } | final OAuthProfileDefinition < U , T , O > profileDefinition = configuration . getProfileDefinition ( ) ; final String profileUrl = profileDefinition . getProfileUrl ( accessToken , configuration ) ; final S service = this . configuration . buildService ( context , client , null ) ; final String body = sendRequestForData ( service , accessToken , profileUrl , profileDefinition . getProfileVerb ( ) ) ; logger . info ( "UserProfile: " + body ) ; if ( body == null ) { throw new HttpCommunicationException ( "No data found for accessToken: " + accessToken ) ; } final U profile = ( U ) configuration . getProfileDefinition ( ) . extractUserProfile ( body ) ; addAccessTokenToProfile ( profile , accessToken ) ; return Optional . of ( profile ) ; |
public class PollCachingJdbcRegistry { /** * Checks the ES store to see if the ' dataVersion ' entry has been updated with a newer
* version # . If it has , then we need to invalidate our cache . */
protected void checkCacheVersion ( ) { } } | // Be very aggressive in invalidating the cache .
boolean invalidate = true ; QueryRunner run = new QueryRunner ( ds ) ; try { long latestVersion = run . query ( "SELECT version FROM gw_dataversion" , Handlers . LONG_HANDLER ) ; // $ NON - NLS - 1 $
if ( latestVersion > - 1 && dataVersion > - 1 && latestVersion == dataVersion ) { invalidate = false ; } else { dataVersion = latestVersion ; } } catch ( SQLException e ) { // TODO need to use the gateway logger to log this !
e . printStackTrace ( ) ; } if ( invalidate ) { invalidateCache ( ) ; } |
public class CommonHelper { /** * Takes a map of salt node IDs to a value and return a new map that uses the
* SNodes as keys instead of the IDs .
* @ param < V >
* @ param map
* @ param graph
* @ return */
public static < V > Map < SNode , V > createSNodeMapFromIDs ( Map < String , V > map , SDocumentGraph graph ) { } } | HashMap < SNode , V > result = new LinkedHashMap < > ( ) ; if ( map != null && graph != null ) { for ( Map . Entry < String , V > e : map . entrySet ( ) ) { SNode n = graph . getNode ( e . getKey ( ) ) ; if ( n != null ) { result . put ( n , e . getValue ( ) ) ; } } } return result ; |
public class WSKeyStore { /** * Set the physical location of this store to the input value .
* This method will resolve all symbolic names in the location .
* If location is just a file name , then we assume its location is
* the LibertyConstants . DEFAULT _ CONFIG _ LOCATION .
* @ param _ location */
private void setLocation ( String _location ) { } } | String res = null ; File resFile = null ; boolean relativePath = true ; boolean defaultPath = false ; // try as " absolute " resource ( contains symbol , or absolute path )
try { res = cfgSvc . resolveString ( _location ) ; resFile = new File ( res ) ; relativePath = ! resFile . isAbsolute ( ) ; } catch ( IllegalStateException e ) { // ignore
} if ( resFile == null || ( ! resFile . isFile ( ) && relativePath ) ) { // look for resource in server config location
try { res = cfgSvc . resolveString ( LibertyConstants . DEFAULT_CONFIG_LOCATION + _location ) ; resFile = new File ( res ) ; } catch ( IllegalStateException e ) { // ignore
} if ( resFile == null || ! resFile . isFile ( ) ) { // fall back to creating for resource in shared output location
try { res = cfgSvc . resolveString ( LibertyConstants . DEFAULT_OUTPUT_LOCATION + _location ) ; resFile = new File ( res ) ; defaultPath = true ; } catch ( IllegalStateException e ) { // ignore
} } } // Work against the symbol in the original location
// The original location may be been
if ( isDefault && ( defaultPath || locationInOutputDir ( _location ) ) ) { this . initializeAtStartup = true ; } // reset location w / resolved value
// isDefault tested because the default path ' s file may not exists ( and that ' s OK )
if ( ( res != null && resFile . isFile ( ) ) || isDefault ) { this . location = res ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Found store under [" + location + "]" ) ; } } else { // If it wasn ' t found then it ' s likely going to trigger
// the load . error later . Issue a warning to explain the file
// could not be found .
Tr . warning ( tc , "ssl.keystore.not.found.warning" , res , name ) ; } |
public class TimeMachine { /** * Transforms a Runnable into a Block , so that we need only one
* implementation of the time - traveling code
* @ param code
* The Runnable to be converted
* @ return A Block that , when called , executes the given Runnable */
private Block < Void > toBlock ( final Runnable code ) { } } | return new Block < Void > ( ) { public Void run ( ) { code . run ( ) ; return null ; } } ; |
public class OTMJCAManagedConnectionFactory { /** * return a new managed connection . This connection is wrapped around the real connection and delegates to it
* to get work done .
* @ param subject
* @ param info
* @ return */
public ManagedConnection createManagedConnection ( Subject subject , ConnectionRequestInfo info ) { } } | Util . log ( "In OTMJCAManagedConnectionFactory.createManagedConnection" ) ; try { Kit kit = getKit ( ) ; PBKey key = ( ( OTMConnectionRequestInfo ) info ) . getPbKey ( ) ; OTMConnection connection = kit . acquireConnection ( key ) ; return new OTMJCAManagedConnection ( this , connection , key ) ; } catch ( ResourceException e ) { throw new OTMConnectionRuntimeException ( e . getMessage ( ) ) ; } |
public class DiSH { /** * Returns the parent of the specified cluster
* @ param relation the relation storing the objects
* @ param child the child to search the parent for
* @ param clustersMap the map containing the clusters
* @ return the parent of the specified cluster */
private Pair < long [ ] , ArrayModifiableDBIDs > findParent ( Relation < V > relation , Pair < long [ ] , ArrayModifiableDBIDs > child , Object2ObjectMap < long [ ] , List < ArrayModifiableDBIDs > > clustersMap ) { } } | Centroid child_centroid = ProjectedCentroid . make ( child . first , relation , child . second ) ; Pair < long [ ] , ArrayModifiableDBIDs > result = null ; int resultCardinality = - 1 ; long [ ] childPV = child . first ; int childCardinality = BitsUtil . cardinality ( childPV ) ; for ( long [ ] parentPV : clustersMap . keySet ( ) ) { int parentCardinality = BitsUtil . cardinality ( parentPV ) ; if ( parentCardinality >= childCardinality || ( resultCardinality != - 1 && parentCardinality <= resultCardinality ) ) { continue ; } long [ ] pv = BitsUtil . andCMin ( childPV , parentPV ) ; if ( BitsUtil . equal ( pv , parentPV ) ) { List < ArrayModifiableDBIDs > parentList = clustersMap . get ( parentPV ) ; for ( ArrayModifiableDBIDs parent : parentList ) { NumberVector parent_centroid = ProjectedCentroid . make ( parentPV , relation , parent ) ; double d = weightedDistance ( child_centroid , parent_centroid , parentPV ) ; if ( d <= 2 * epsilon ) { result = new Pair < > ( parentPV , parent ) ; resultCardinality = parentCardinality ; break ; } } } } return result ; |
public class DisplacedLognormalGARCH { /** * / * ( non - Javadoc )
* @ see net . finmath . timeseries . HistoricalSimulationModel # getBestParameters ( java . util . Map ) */
@ Override public Map < String , Object > getBestParameters ( Map < String , Object > guess ) { } } | // Create the objective function for the solver
class GARCHMaxLikelihoodFunction implements MultivariateFunction , Serializable { private static final long serialVersionUID = 7072187082052755854L ; @ Override public double value ( double [ ] variables ) { /* * Transform variables : The solver variables are in ( - \ infty , \ infty ) .
* We transform the variable to the admissible domain for GARCH , that is
* omega > 0 , 0 < alpha < 1 , 0 < beta < ( 1 - alpha ) , displacement > lowerBoundDisplacement ? ? ? ? ?
* ? ? ? ? usually for GARCH the restrictions are written like omega > 0 , alpha > 0 , beta > 0 , and alpha + beta < 1 */
double omega = Math . exp ( variables [ 0 ] ) ; double mucorr = Math . exp ( - Math . exp ( - variables [ 1 ] ) ) ; double muema = Math . exp ( - Math . exp ( - variables [ 2 ] ) ) ; double beta = mucorr * muema ; double alpha = mucorr - beta ; // double alpha = 1.0 / ( 1.0 + Math . exp ( - variables [ 1 ] ) ) ;
// double beta = ( 1.0 - alpha ) * 1.0 / ( 1.0 + Math . exp ( - variables [ 2 ] ) ) ;
double displacementNormed = 1.0 / ( 1.0 + Math . exp ( - variables [ 3 ] ) ) ; double displacement = ( upperBoundDisplacement - lowerBoundDisplacement ) * displacementNormed + lowerBoundDisplacement ; double logLikelihood = getLogLikelihoodForParameters ( omega , alpha , beta , displacement ) ; // Penalty to prevent solver from hitting the bounds
logLikelihood -= Math . max ( 1E-30 - omega , 0 ) / 1E-30 ; logLikelihood -= Math . max ( 1E-30 - alpha , 0 ) / 1E-30 ; logLikelihood -= Math . max ( ( alpha - 1 ) + 1E-30 , 0 ) / 1E-30 ; logLikelihood -= Math . max ( 1E-30 - beta , 0 ) / 1E-30 ; logLikelihood -= Math . max ( ( beta - 1 ) + 1E-30 , 0 ) / 1E-30 ; logLikelihood -= Math . max ( 1E-30 - displacementNormed , 0 ) / 1E-30 ; logLikelihood -= Math . max ( ( displacementNormed - 1 ) + 1E-30 , 0 ) / 1E-30 ; return logLikelihood ; } } final GARCHMaxLikelihoodFunction objectiveFunction = new GARCHMaxLikelihoodFunction ( ) ; // Create a guess for the solver
double guessOmega = 1.0 ; double guessAlpha = 0.2 ; double guessBeta = 0.2 ; double guessDisplacement = ( lowerBoundDisplacement + upperBoundDisplacement ) / 2.0 ; if ( guess != null ) { // A guess was provided , use that one
guessOmega = ( Double ) guess . get ( "Omega" ) ; guessAlpha = ( Double ) guess . get ( "Alpha" ) ; guessBeta = ( Double ) guess . get ( "Beta" ) ; guessDisplacement = ( Double ) guess . get ( "Displacement" ) ; } // Constrain guess to admissible range
guessOmega = restrictToOpenSet ( guessOmega , 0.0 , Double . MAX_VALUE ) ; guessAlpha = restrictToOpenSet ( guessAlpha , 0.0 , 1.0 ) ; guessBeta = restrictToOpenSet ( guessBeta , 0.0 , 1.0 - guessAlpha ) ; guessDisplacement = restrictToOpenSet ( guessDisplacement , lowerBoundDisplacement , upperBoundDisplacement ) ; double guessMucorr = guessAlpha + guessBeta ; double guessMuema = guessBeta / ( guessAlpha + guessBeta ) ; // Transform guess to solver coordinates
double [ ] guessParameters = new double [ 4 ] ; guessParameters [ 0 ] = Math . log ( guessOmega ) ; guessParameters [ 1 ] = - Math . log ( - Math . log ( guessMucorr ) ) ; guessParameters [ 2 ] = - Math . log ( - Math . log ( guessMuema ) ) ; guessParameters [ 3 ] = - Math . log ( 1.0 / ( ( guessDisplacement - lowerBoundDisplacement ) / ( upperBoundDisplacement - lowerBoundDisplacement ) ) - 1.0 ) ; // Seek optimal parameter configuration
Optimizer lm = new LevenbergMarquardt ( guessParameters , new double [ ] { 1000 } , 10 * maxIterations , 2 ) { private static final long serialVersionUID = 8030873619034187741L ; @ Override public void setValues ( double [ ] arg0 , double [ ] arg1 ) { arg1 [ 0 ] = objectiveFunction . value ( arg0 ) ; } } ; double [ ] bestParameters = null ; boolean isUseLM = false ; if ( isUseLM ) { try { lm . run ( ) ; } catch ( SolverException e1 ) { // TODO Auto - generated catch block
e1 . printStackTrace ( ) ; } bestParameters = lm . getBestFitParameters ( ) ; } else { org . apache . commons . math3 . optimization . direct . CMAESOptimizer optimizer2 = new org . apache . commons . math3 . optimization . direct . CMAESOptimizer ( ) ; try { PointValuePair result = optimizer2 . optimize ( maxIterations , objectiveFunction , GoalType . MAXIMIZE , guessParameters ) ; bestParameters = result . getPoint ( ) ; } catch ( org . apache . commons . math3 . exception . MathIllegalStateException e ) { System . out . println ( "Solver failed" ) ; bestParameters = guessParameters ; } } // Transform parameters to GARCH parameters
double omega = Math . exp ( bestParameters [ 0 ] ) ; double mucorr = Math . exp ( - Math . exp ( - bestParameters [ 1 ] ) ) ; double muema = Math . exp ( - Math . exp ( - bestParameters [ 2 ] ) ) ; double beta = mucorr * muema ; double alpha = mucorr - beta ; double displacementNormed = 1.0 / ( 1.0 + Math . exp ( - bestParameters [ 3 ] ) ) ; double displacement = ( upperBoundDisplacement - lowerBoundDisplacement ) * displacementNormed + lowerBoundDisplacement ; double [ ] quantiles = { 0.01 , 0.05 , 0.5 } ; double [ ] quantileValues = getQuantilPredictionsForParameters ( omega , alpha , beta , displacement , quantiles ) ; Map < String , Object > results = new HashMap < > ( ) ; results . put ( "Omega" , omega ) ; results . put ( "Alpha" , alpha ) ; results . put ( "Beta" , beta ) ; results . put ( "Displacement" , displacement ) ; results . put ( "Szenarios" , this . getSzenarios ( omega , alpha , beta , displacement ) ) ; results . put ( "Likelihood" , this . getLogLikelihoodForParameters ( omega , alpha , beta , displacement ) ) ; results . put ( "Vol" , Math . sqrt ( this . getLastResidualForParameters ( omega , alpha , beta , displacement ) ) ) ; results . put ( "Quantile=1%" , quantileValues [ 0 ] ) ; results . put ( "Quantile=5%" , quantileValues [ 1 ] ) ; results . put ( "Quantile=50%" , quantileValues [ 2 ] ) ; // System . out . println ( results . get ( " Likelihood " ) + " \ t " + Arrays . toString ( bestParameters ) ) ;
return results ; |
public class FieldTextBuilder { /** * Appends the specified fieldtext onto the builder using the WHEN operator . A simplification is made in the case
* where the passed { @ code fieldText } is equal to { @ code this } :
* < pre > ( A WHEN A ) { @ literal = > } A < / pre >
* @ param fieldText A fieldtext expression or specifier .
* @ return { @ code this } */
@ Override public FieldTextBuilder WHEN ( final FieldText fieldText ) { } } | Validate . notNull ( fieldText , "FieldText should not be null" ) ; Validate . isTrue ( fieldText . size ( ) >= 1 , "FieldText must have a size greater or equal to 1" ) ; if ( size ( ) < 1 ) { throw new IllegalStateException ( "Size must be greater than or equal to 1" ) ; } // Here we assume that A WHEN A = > A but is there an edge case where this doesn ' t work ?
if ( fieldText == this || toString ( ) . equals ( fieldText . toString ( ) ) ) { return this ; } if ( MATCHNOTHING . equals ( this ) || MATCHNOTHING . equals ( fieldText ) ) { return setFieldText ( MATCHNOTHING ) ; } return binaryOperation ( "WHEN" , fieldText ) ; |
public class RestApiClient { /** * Unlock user .
* @ param username
* the username
* @ return the response */
public Response unlockUser ( String username ) { } } | return restClient . delete ( "lockouts/" + username , new HashMap < String , String > ( ) ) ; |
public class BlobOutputStream { /** * Flushes this output stream and forces any buffered output bytes to be written out . The general
* contract of < code > flush < / code > is that calling it is an indication that , if any bytes
* previously written have been buffered by the implementation of the output stream , such bytes
* should immediately be written to their intended destination .
* @ throws IOException if an I / O error occurs . */
public void flush ( ) throws IOException { } } | checkClosed ( ) ; try { if ( bpos > 0 ) { lo . write ( buf , 0 , bpos ) ; } bpos = 0 ; } catch ( SQLException se ) { throw new IOException ( se . toString ( ) ) ; } |
public class SimpleFileDownloader { /** * Downloads file from HTTP or FTP .
* @ param fileUrl source file
* @ return path of downloaded file
* @ throws IOException if IO problems
* @ throws PluginException if validation fails or any other problems */
protected Path downloadFileHttp ( URL fileUrl ) throws IOException , PluginException { } } | Path destination = Files . createTempDirectory ( "pf4j-update-downloader" ) ; destination . toFile ( ) . deleteOnExit ( ) ; String path = fileUrl . getPath ( ) ; String fileName = path . substring ( path . lastIndexOf ( '/' ) + 1 ) ; Path file = destination . resolve ( fileName ) ; // set up the URL connection
URLConnection connection = fileUrl . openConnection ( ) ; // connect to the remote site ( may takes some time )
connection . connect ( ) ; // check for http authorization
HttpURLConnection httpConnection = ( HttpURLConnection ) connection ; if ( httpConnection . getResponseCode ( ) == HttpURLConnection . HTTP_UNAUTHORIZED ) { throw new ConnectException ( "HTTP Authorization failure" ) ; } // try to get the server - specified last - modified date of this artifact
long lastModified = httpConnection . getHeaderFieldDate ( "Last-Modified" , System . currentTimeMillis ( ) ) ; // try to get the input stream ( three times )
InputStream is = null ; for ( int i = 0 ; i < 3 ; i ++ ) { try { is = connection . getInputStream ( ) ; break ; } catch ( IOException e ) { log . error ( e . getMessage ( ) , e ) ; } } if ( is == null ) { throw new ConnectException ( "Can't get '" + fileUrl + " to '" + file + "'" ) ; } // reade from remote resource and write to the local file
FileOutputStream fos = new FileOutputStream ( file . toFile ( ) ) ; byte [ ] buffer = new byte [ 1024 ] ; int length ; while ( ( length = is . read ( buffer ) ) >= 0 ) { fos . write ( buffer , 0 , length ) ; } fos . close ( ) ; is . close ( ) ; log . debug ( "Set last modified of '{}' to '{}'" , file , lastModified ) ; Files . setLastModifiedTime ( file , FileTime . fromMillis ( lastModified ) ) ; return file ; |
public class BusinessExceptionMappingStrategy { /** * This method is specifically designed for use during EJSContainer
* postInvoke processing . It maps the internal exception indicating
* a rollback has occurred to the appropriate one for the interface
* ( i . e . local , remote , business , etc ) . < p > */
@ Override public Exception mapCSITransactionRolledBackException ( EJSDeployedSupport s , CSITransactionRolledbackException ex ) throws com . ibm . websphere . csi . CSIException { } } | Throwable cause = null ; Exception causeEx = null ; // If the invoked method threw a System exception , or an Application
// exception that was marked for rollback , or the application called
// setRollBackOnly . . . then use the exception thrown by the method
// as the cause of the rollback exception .
if ( s . exType == ExceptionType . UNCHECKED_EXCEPTION ) { cause = s . ivException ; } // Otherwise , use the cause of the CSIException as the cause ,
// unless it has no cause . . . then use the app exception .
else { cause = ExceptionUtil . findCause ( ex ) ; if ( cause == null ) cause = s . ivException ; } // Because this will be mapped to an EJBException , and Throwable
// is not supported on the constructor . . . insure the cause is
// either an Exception , or wrap it in an Exception .
if ( cause != null ) { if ( cause instanceof Exception ) { causeEx = ( Exception ) cause ; } else { causeEx = new Exception ( "See nested Throwable" , cause ) ; } } // Now , map this CSIException . . . this will take care of getting
// the stack set appropriately and will set the ' cause ' on
// Throwable , so getCause works .
Exception mappedEx = mapCSIException ( ex , causeEx , cause ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "mapped exception = " + mappedEx ) ; return mappedEx ; |
public class HybridRunbookWorkerGroupsInner { /** * Update a hybrid runbook worker group .
* @ param resourceGroupName Name of an Azure Resource group .
* @ param automationAccountName The name of the automation account .
* @ param hybridRunbookWorkerGroupName The hybrid runbook worker group name
* @ param credential Sets the credential of a worker group .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable to the HybridRunbookWorkerGroupInner object */
public Observable < ServiceResponse < HybridRunbookWorkerGroupInner > > updateWithServiceResponseAsync ( String resourceGroupName , String automationAccountName , String hybridRunbookWorkerGroupName , RunAsCredentialAssociationProperty credential ) { } } | if ( resourceGroupName == null ) { throw new IllegalArgumentException ( "Parameter resourceGroupName is required and cannot be null." ) ; } if ( automationAccountName == null ) { throw new IllegalArgumentException ( "Parameter automationAccountName is required and cannot be null." ) ; } if ( hybridRunbookWorkerGroupName == null ) { throw new IllegalArgumentException ( "Parameter hybridRunbookWorkerGroupName is required and cannot be null." ) ; } if ( this . client . subscriptionId ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.subscriptionId() is required and cannot be null." ) ; } Validator . validate ( credential ) ; final String apiVersion = "2015-10-31" ; HybridRunbookWorkerGroupUpdateParameters parameters = new HybridRunbookWorkerGroupUpdateParameters ( ) ; parameters . withCredential ( credential ) ; return service . update ( resourceGroupName , automationAccountName , hybridRunbookWorkerGroupName , this . client . subscriptionId ( ) , apiVersion , this . client . acceptLanguage ( ) , parameters , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < HybridRunbookWorkerGroupInner > > > ( ) { @ Override public Observable < ServiceResponse < HybridRunbookWorkerGroupInner > > call ( Response < ResponseBody > response ) { try { ServiceResponse < HybridRunbookWorkerGroupInner > clientResponse = updateDelegate ( response ) ; return Observable . just ( clientResponse ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ; |
public class PdfContentByte { /** * Restores the graphic state . < CODE > saveState < / CODE > and
* < CODE > restoreState < / CODE > must be balanced . */
public void restoreState ( ) { } } | content . append ( "Q" ) . append_i ( separator ) ; int idx = stateList . size ( ) - 1 ; if ( idx < 0 ) throw new IllegalPdfSyntaxException ( "Unbalanced save/restore state operators." ) ; state = ( GraphicState ) stateList . get ( idx ) ; stateList . remove ( idx ) ; |
public class JFunk { /** * Starts jFunk .
* < pre >
* - threadcount = & lt ; count & gt ; Optional Number of threads to be used . Allows for parallel
* execution of test scripts .
* - parallel Optional Allows a single script to be executed in parallel
* depending on the number of threads specified . The
* argument is ignored if multiple scripts are specified .
* & lt ; script parameters & gt Optional Similar to Java system properties they can be provided
* as key - value - pairs preceded by - S , e . g . - Skey = value .
* These parameters are then available in the script as
* Groovy variables .
* & lt ; script ( s ) & gt ; Required At least one test script must be specified .
* Example :
* java - cp & lt ; jFunkClasspath & gt ; com . mgmtp . jfunk . core . JFunk - Skey = value - threadcount = 4 - parallel mytest . script
* < / pre >
* @ param args
* The program arguments . */
public static void main ( final String [ ] args ) { } } | SLF4JBridgeHandler . install ( ) ; boolean exitWithError = true ; StopWatch stopWatch = new StopWatch ( ) ; try { RESULT_LOG . info ( "jFunk started" ) ; stopWatch . start ( ) ; int threadCount = 1 ; boolean parallel = false ; Properties scriptProperties = new Properties ( ) ; List < File > scripts = Lists . newArrayList ( ) ; for ( String arg : args ) { if ( arg . startsWith ( "-threadcount" ) ) { String [ ] split = arg . split ( "=" ) ; Preconditions . checkArgument ( split . length == 2 , "The number of threads must be specified as follows: -threadcount=<value>" ) ; threadCount = Integer . parseInt ( split [ 1 ] ) ; RESULT_LOG . info ( "Using " + threadCount + ( threadCount == 1 ? " thread" : " threads" ) ) ; } else if ( arg . startsWith ( "-S" ) ) { arg = arg . substring ( 2 ) ; String [ ] split = arg . split ( "=" ) ; Preconditions . checkArgument ( split . length == 2 , "Script parameters must be given in the form -S<name>=<value>" ) ; scriptProperties . setProperty ( split [ 0 ] , normalizeScriptParameterValue ( split [ 1 ] ) ) ; RESULT_LOG . info ( "Using script parameter " + split [ 0 ] + " with value " + split [ 1 ] ) ; } else if ( arg . equals ( "-parallel" ) ) { parallel = true ; RESULT_LOG . info ( "Using parallel mode" ) ; } else { scripts . add ( new File ( arg ) ) ; } } if ( scripts . isEmpty ( ) ) { scripts . addAll ( requestScriptsViaGui ( ) ) ; if ( scripts . isEmpty ( ) ) { RESULT_LOG . info ( "Execution finished (took " + stopWatch + " H:mm:ss.SSS)" ) ; System . exit ( 0 ) ; } } String propsFileName = System . getProperty ( "jfunk.props.file" , "jfunk.properties" ) ; Module module = ModulesLoader . loadModulesFromProperties ( new JFunkDefaultModule ( ) , propsFileName ) ; Injector injector = Guice . createInjector ( module ) ; JFunkFactory factory = injector . getInstance ( JFunkFactory . class ) ; JFunkBase jFunk = factory . create ( threadCount , parallel , scripts , scriptProperties ) ; jFunk . execute ( ) ; exitWithError = false ; } catch ( JFunkExecutionException ex ) { // no logging necessary
} catch ( Exception ex ) { Logger . getLogger ( JFunk . class ) . error ( "jFunk terminated unexpectedly." , ex ) ; } finally { stopWatch . stop ( ) ; RESULT_LOG . info ( "Execution finished (took " + stopWatch + " H:mm:ss.SSS)" ) ; } System . exit ( exitWithError ? - 1 : 0 ) ; |
public class DefaultInternalConfiguration { /** * Finds the start of a variable name in the given string . Variables use the " $ { < i > variableName < / i > } " notation .
* @ param aKey the key to search .
* @ return the index of the start of a variable name in the given string , or - 1 if not found . */
private int findStartVariable ( final String aKey ) { } } | if ( aKey == null ) { return - 1 ; } // Look for the first occurence of $ { in the parameter .
int index = aKey . indexOf ( '$' ) ; for ( ; index >= 0 ; index = aKey . indexOf ( '$' , index + 1 ) ) { if ( index == aKey . length ( ) - 1 ) { continue ; } if ( aKey . charAt ( index + 1 ) != '{' ) { continue ; } // Ahh - got it
break ; // NOPMD
} return index ; |
public class Lazy { /** * Java object serialization */
private void writeObject ( final ObjectOutputStream out ) throws IOException { } } | final Object value = get ( ) ; out . defaultWriteObject ( ) ; out . writeObject ( value ) ; |
public class OverlordAccessor { /** * Poll for task ' s status for tasks fired with 0 wait time .
* @ param taskId
* @ param reqHeaders
* @ return */
public TaskStatus pollTaskStatus ( String taskId , Map < String , String > reqHeaders ) { } } | CloseableHttpResponse resp = null ; String url = format ( "%s/%s/status" , format ( overlordUrl , overlordHost , overlordPort ) , taskId ) ; try { resp = get ( url , reqHeaders ) ; // TODO : Check for nulls in the following .
JSONObject respJson = new JSONObject ( IOUtils . toString ( resp . getEntity ( ) . getContent ( ) ) ) ; JSONObject status = respJson . optJSONObject ( "status" ) ; if ( status != null && null != status . getString ( "status" ) ) { switch ( status . getString ( "status" ) ) { case "SUCCESS" : return TaskStatus . SUCCESS ; case "RUNNING" : return TaskStatus . RUNNING ; default : return TaskStatus . FAILED ; } } } catch ( IOException ex ) { log . error ( "Error polling for task status {}" , ExceptionUtils . getStackTrace ( ex ) ) ; } finally { returnClient ( resp ) ; } // Happens when result looks like { " task " : " null " } which has no status ( because such a task itself does not exist )
return TaskStatus . UNKNOWN ; |
public class MethodsBinding { /** * / * @ Override */
public S unmarshal ( T string ) { } } | try { return getBoundClass ( ) . cast ( unmarshalHandle . invoke ( string ) ) ; } catch ( Throwable ex ) { if ( ex . getCause ( ) instanceof RuntimeException ) { throw ( RuntimeException ) ex . getCause ( ) ; } throw new BindingException ( ex . getMessage ( ) , ex . getCause ( ) ) ; } |
public class SubscriptionSchedule { /** * Retrieves the details of an existing subscription schedule . You only need to supply the unique
* subscription schedule identifier that was returned upon subscription schedule creation . */
public static SubscriptionSchedule retrieve ( String schedule ) throws StripeException { } } | return retrieve ( schedule , ( Map < String , Object > ) null , ( RequestOptions ) null ) ; |
public class NikeFS2SwapFileManager { /** * Deletes the given directory recursively , i . e . , bottom - up .
* @ param directory Directory to be deleted .
* @ return The number of files deleted . */
private static synchronized int deleteRecursively ( File directory ) { } } | int deleted = 0 ; if ( directory . isDirectory ( ) ) { File [ ] contents = directory . listFiles ( ) ; for ( File file : contents ) { deleted += deleteRecursively ( file ) ; } } else { deleted ++ ; } directory . delete ( ) ; return deleted ; |
public class DigestUtils { /** * Hashes the source with SHA1 and returns the resulting hash as an hexadecimal string .
* @ param source the text to hash .
* @ return the SHA1 hash of the source , in hexadecimal form . */
public static String digestSha1Hex ( String source ) { } } | String sha1 = "" ; try { MessageDigest crypt = MessageDigest . getInstance ( "SHA-1" ) ; crypt . reset ( ) ; crypt . update ( source . getBytes ( "UTF-8" ) ) ; sha1 = byteToHex ( crypt . digest ( ) ) ; } catch ( NoSuchAlgorithmException e ) { e . printStackTrace ( ) ; } catch ( UnsupportedEncodingException e ) { e . printStackTrace ( ) ; } return sha1 ; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.