signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class AbstractEnvironment { /** * Construct the path to the support servlet . Web components that need to construct a URL to target this servlet * will use this method . * @ return the path to the AJAX servlet . */ @ Override public String getWServletPath ( ) { } }
final String configValue = ConfigurationProperties . getServletSupportPath ( ) ; return configValue == null ? getPostPath ( ) : getServletPath ( configValue ) ;
public class BoundingBox { /** * Returns if an area built from the latLongs intersects with a bias towards * returning true . * The method returns fast if any of the points lie within the bbox . If none of the points * lie inside the box , it constructs the outer bbox for all the points and tests for intersection * ( so it is possible that the area defined by the points does not actually intersect ) * @ param latLongs the points that define an area * @ return false if there is no intersection , true if there could be an intersection */ public boolean intersectsArea ( LatLong [ ] [ ] latLongs ) { } }
if ( latLongs . length == 0 || latLongs [ 0 ] . length == 0 ) { return false ; } for ( LatLong [ ] outer : latLongs ) { for ( LatLong latLong : outer ) { if ( this . contains ( latLong ) ) { // if any of the points is inside the bbox return early return true ; } } } // no fast solution , so accumulate boundary points double tmpMinLat = latLongs [ 0 ] [ 0 ] . latitude ; double tmpMinLon = latLongs [ 0 ] [ 0 ] . longitude ; double tmpMaxLat = latLongs [ 0 ] [ 0 ] . latitude ; double tmpMaxLon = latLongs [ 0 ] [ 0 ] . longitude ; for ( LatLong [ ] outer : latLongs ) { for ( LatLong latLong : outer ) { tmpMinLat = Math . min ( tmpMinLat , latLong . latitude ) ; tmpMaxLat = Math . max ( tmpMaxLat , latLong . latitude ) ; tmpMinLon = Math . min ( tmpMinLon , latLong . longitude ) ; tmpMaxLon = Math . max ( tmpMaxLon , latLong . longitude ) ; } } return this . intersects ( new BoundingBox ( tmpMinLat , tmpMinLon , tmpMaxLat , tmpMaxLon ) ) ;
public class ListValue { /** * ( non - Javadoc ) * @ see java . util . List # subList ( int , int ) */ @ Override public List < V > subList ( final int fromIndex , final int toIndex ) { } }
return this . list . subList ( fromIndex , toIndex ) ;
public class ST_AddPoint { /** * Returns a new geometry based on an existing one , with a specific point as * a new vertex . * @ param geometry * @ param point * @ param tolerance * @ return Null if the vertex cannot be inserted * @ throws SQLException If the vertex can be inserted but it makes the * geometry to be in an invalid shape */ public static Geometry addPoint ( Geometry geometry , Point point , double tolerance ) throws SQLException { } }
if ( geometry == null || point == null ) { return null ; } if ( geometry instanceof MultiPoint ) { return insertVertexInMultipoint ( geometry , point ) ; } else if ( geometry instanceof LineString ) { return insertVertexInLineString ( ( LineString ) geometry , point , tolerance ) ; } else if ( geometry instanceof MultiLineString ) { LineString [ ] linestrings = new LineString [ geometry . getNumGeometries ( ) ] ; boolean any = false ; for ( int i = 0 ; i < geometry . getNumGeometries ( ) ; i ++ ) { LineString line = ( LineString ) geometry . getGeometryN ( i ) ; LineString inserted = insertVertexInLineString ( line , point , tolerance ) ; if ( inserted != null ) { linestrings [ i ] = inserted ; any = true ; } else { linestrings [ i ] = line ; } } if ( any ) { return FACTORY . createMultiLineString ( linestrings ) ; } else { return null ; } } else if ( geometry instanceof Polygon ) { return insertVertexInPolygon ( ( Polygon ) geometry , point , tolerance ) ; } else if ( geometry instanceof MultiPolygon ) { Polygon [ ] polygons = new Polygon [ geometry . getNumGeometries ( ) ] ; boolean any = false ; for ( int i = 0 ; i < geometry . getNumGeometries ( ) ; i ++ ) { Polygon polygon = ( Polygon ) geometry . getGeometryN ( i ) ; Polygon inserted = insertVertexInPolygon ( polygon , point , tolerance ) ; if ( inserted != null ) { any = true ; polygons [ i ] = inserted ; } else { polygons [ i ] = polygon ; } } if ( any ) { return FACTORY . createMultiPolygon ( polygons ) ; } else { return null ; } } else if ( geometry instanceof Point ) { return null ; } throw new SQLException ( "Unknown geometry type" + " : " + geometry . getGeometryType ( ) ) ;
public class ClassUtil { /** * 指定类是否为Public * @ param clazz 类 * @ return 是否为public */ public static boolean isPublic ( Class < ? > clazz ) { } }
if ( null == clazz ) { throw new NullPointerException ( "Class to provided is null." ) ; } return Modifier . isPublic ( clazz . getModifiers ( ) ) ;
public class HttpServiceSchedulerClient { /** * Send payload to target HTTP connection to request a service * @ param data the byte [ ] to send * @ return true if got OK response successfully */ protected boolean requestSchedulerService ( Command command , byte [ ] data ) { } }
String endpoint = getCommandEndpoint ( schedulerHttpEndpoint , command ) ; final HttpURLConnection connection = NetworkUtils . getHttpConnection ( endpoint ) ; if ( connection == null ) { LOG . severe ( "Scheduler not found." ) ; return false ; } // now , we have a valid connection try { // send the actual http request if ( ! NetworkUtils . sendHttpPostRequest ( connection , NetworkUtils . URL_ENCODE_TYPE , data ) ) { LOG . log ( Level . SEVERE , "Failed to send http request to scheduler" ) ; return false ; } // receive the response for manage topology Common . StatusCode statusCode ; LOG . fine ( "Receiving response from scheduler..." ) ; try { statusCode = Scheduler . SchedulerResponse . newBuilder ( ) . mergeFrom ( NetworkUtils . readHttpResponse ( connection ) ) . build ( ) . getStatus ( ) . getStatus ( ) ; } catch ( InvalidProtocolBufferException e ) { LOG . log ( Level . SEVERE , "Failed to parse response" , e ) ; return false ; } if ( ! statusCode . equals ( Common . StatusCode . OK ) ) { LOG . severe ( "Received not OK response from scheduler" ) ; return false ; } } finally { connection . disconnect ( ) ; } return true ;
public class ComputeNodeOperations { /** * Disables task scheduling on the specified compute node . * @ param poolId The ID of the pool . * @ param nodeId The ID of the compute node . * @ param nodeDisableSchedulingOption Specifies what to do with currently running tasks . * @ throws BatchErrorException Exception thrown when an error response is received from the Batch service . * @ throws IOException Exception thrown when there is an error in serialization / deserialization of data sent to / received from the Batch service . */ public void disableComputeNodeScheduling ( String poolId , String nodeId , DisableComputeNodeSchedulingOption nodeDisableSchedulingOption ) throws BatchErrorException , IOException { } }
disableComputeNodeScheduling ( poolId , nodeId , nodeDisableSchedulingOption , null ) ;
public class ApiOvhPrice { /** * Get the price of a JOB consumption for 1 hour * REST : GET / price / hpcspot / consumption / job / { reference } * @ param reference [ required ] The reference of the JOB consumption */ public OvhPrice hpcspot_consumption_job_reference_GET ( net . minidev . ovh . api . price . hpcspot . consumption . OvhJobEnum reference ) throws IOException { } }
String qPath = "/price/hpcspot/consumption/job/{reference}" ; StringBuilder sb = path ( qPath , reference ) ; String resp = exec ( qPath , "GET" , sb . toString ( ) , null ) ; return convertTo ( resp , OvhPrice . class ) ;
public class SoundGroup { /** * Called by the manager when the base gain has changed . */ protected void baseGainChanged ( ) { } }
// notify any sound currently holding a source for ( int ii = 0 , nn = _sources . size ( ) ; ii < nn ; ii ++ ) { Sound holder = _sources . get ( ii ) . holder ; if ( holder != null ) { holder . updateSourceGain ( ) ; } }
public class TeaCompiler { /** * Overridden to make public */ public Parser createParser ( org . teatrove . tea . compiler . Scanner scanner , CompilationUnit unit ) throws IOException { } }
Parser parser = super . createParser ( scanner , unit ) ; CompileListener parserListener = getParserListener ( ) ; if ( parserListener != null ) { parser . addCompileListener ( parserListener ) ; } return parser ;
public class WorkbookCreationHelper { /** * Added a new cell as date / time . * Important : don ' t forget to call { @ link # addCellStyle ( ExcelStyle ) } with * something like < code > new ExcelStyle ( ) . setDataFormat ( " dd . mm . yyyy " ) ; < / code > * after a date / time cell ! * Important : Excel cannot correctly handle dates / times before * { @ link CExcel # EXCEL _ MINIMUM _ DATE 1900-01-01} * @ param aValue * The value to be set . * @ return A new cell in the current row of the current sheet with the passed * value */ @ Nonnull public Cell addCell ( @ Nullable final LocalDate aValue ) { } }
if ( aValue == null ) return addCell ( ) ; return addCell ( PDTFactory . createZonedDateTime ( aValue ) ) ;
public class AbstractGs2Client { /** * DELETEリクエストを生成 * @ param url アクセス先URL * @ param credential 認証情報 * @ param service アクセス先サービス * @ param module アクセス先モジュール * @ param function アクセス先ファンクション * @ return リクエストオブジェクト */ protected HttpDelete createHttpDelete ( String url , IGs2Credential credential , String service , String module , String function ) { } }
Long timestamp = System . currentTimeMillis ( ) / 1000 ; url = StringUtils . replace ( url , "{service}" , service ) ; url = StringUtils . replace ( url , "{region}" , region . getName ( ) ) ; HttpDelete delete = new HttpDelete ( url ) ; delete . setHeader ( "Content-Type" , "application/json" ) ; credential . authorized ( delete , service , module , function , timestamp ) ; return delete ;
public class ImageUtil { /** * Creates and returns a new image consisting of the supplied image traced with the given * color , thickness and alpha transparency . */ public static BufferedImage createTracedImage ( BufferedImage src , BufferedImage dest , Color tcolor , int thickness , float startAlpha , float endAlpha ) { } }
// prepare various bits of working data int wid = src . getWidth ( ) , hei = src . getHeight ( ) ; int spixel = ( tcolor . getRGB ( ) & RGB_MASK ) ; int salpha = ( int ) ( startAlpha * 255 ) ; int tpixel = ( spixel | ( salpha << 24 ) ) ; boolean [ ] traced = new boolean [ wid * hei ] ; int stepAlpha = ( thickness <= 1 ) ? 0 : ( int ) ( ( ( startAlpha - endAlpha ) * 255 ) / ( thickness - 1 ) ) ; // TODO : this could be made more efficient , e . g . , if we made four passes through the image // in a vertical scan , horizontal scan , and opposing diagonal scans , making sure each // non - transparent pixel found during each scan is traced on both sides of the respective // scan direction . For now , we just naively check all eight pixels surrounding each pixel // in the image and fill the center pixel with the tracing color if it ' s transparent but // has a non - transparent pixel around it . for ( int tt = 0 ; tt < thickness ; tt ++ ) { if ( tt > 0 ) { // clear out the array of pixels traced this go - around Arrays . fill ( traced , false ) ; // use the destination image as our new source src = dest ; // decrement the trace pixel alpha - level salpha -= Math . max ( 0 , stepAlpha ) ; tpixel = ( spixel | ( salpha << 24 ) ) ; } for ( int yy = 0 ; yy < hei ; yy ++ ) { for ( int xx = 0 ; xx < wid ; xx ++ ) { // get the pixel we ' re checking int argb = src . getRGB ( xx , yy ) ; if ( ( argb & TRANS_MASK ) != 0 ) { // copy any pixel that isn ' t transparent dest . setRGB ( xx , yy , argb ) ; } else if ( bordersNonTransparentPixel ( src , wid , hei , traced , xx , yy ) ) { dest . setRGB ( xx , yy , tpixel ) ; // note that we traced this pixel this pass so // that it doesn ' t impact other - pixel borderedness traced [ ( yy * wid ) + xx ] = true ; } } } } return dest ;
public class OriginTrackedValue { /** * Create an { @ link OriginTrackedValue } containing the specified { @ code value } and * { @ code origin } . If the source value implements { @ link CharSequence } then so will * the resulting { @ link OriginTrackedValue } . * @ param value the source value * @ param origin the origin * @ return an { @ link OriginTrackedValue } or { @ code null } if the source value was * { @ code null } . */ public static OriginTrackedValue of ( Object value , Origin origin ) { } }
if ( value == null ) { return null ; } if ( value instanceof CharSequence ) { return new OriginTrackedCharSequence ( ( CharSequence ) value , origin ) ; } return new OriginTrackedValue ( value , origin ) ;
public class SessionManagementBeanImpl { /** * Notify the management listeners on a filterWrite . * NOTE : this starts on the IO thread , but runs a task OFF the thread . */ @ Override public void doSessionClosedListeners ( ) { } }
runManagementTask ( new Runnable ( ) { @ Override public void run ( ) { try { // The particular management listeners change on strategy , so get them here . for ( final SessionManagementListener listener : getManagementListeners ( ) ) { listener . doSessionClosed ( SessionManagementBeanImpl . this ) ; } // XXX should there be a markChanged ( ) here because the session status is now closed ? // Or is that covered by the fact we generate a session - closed message ? } catch ( Exception ex ) { logger . warn ( "Error during doSessionClosed session listener notifications:" , ex ) ; } } } ) ;
public class WhiteboxImpl { /** * Get the first parent constructor defined in a super class of * { @ code klass } . * @ param klass The class where the constructor is located . { @ code null } * @ return A . */ public static Constructor < ? > getFirstParentConstructor ( Class < ? > klass ) { } }
try { return getOriginalUnmockedType ( klass ) . getSuperclass ( ) . getDeclaredConstructors ( ) [ 0 ] ; } catch ( Exception e ) { throw new ConstructorNotFoundException ( "Failed to lookup constructor." , e ) ; }
public class Spies { /** * Proxies a binary consumer spying for first parameter . * @ param < T1 > the consumer first parameter type * @ param < T2 > the consumer second parameter type * @ param consumer the consumer that will be spied * @ param param1 a box that will be containing the first spied parameter * @ return the proxied consumer */ public static < T1 , T2 > BiConsumer < T1 , T2 > spy1st ( BiConsumer < T1 , T2 > consumer , Box < T1 > param1 ) { } }
return spy ( consumer , param1 , Box . < T2 > empty ( ) ) ;
public class CmsSerialDateValue { /** * Read an optional month value form a JSON value . * @ param val the JSON value that should represent the month . * @ return the month from the JSON or null if reading the month fails . */ private Month readOptionalMonth ( JSONValue val ) { } }
String str = readOptionalString ( val ) ; if ( null != str ) { try { return Month . valueOf ( str ) ; } catch ( @ SuppressWarnings ( "unused" ) IllegalArgumentException e ) { // Do nothing - return the default value } } return null ;
public class FatCatAligner { /** * runs rigid chaining process */ private static Group [ ] rChainAfp ( FatCatParameters params , AFPChain afpChain , Atom [ ] ca1 , Atom [ ] ca2 ) throws StructureException { } }
params . setMaxTra ( 0 ) ; afpChain . setMaxTra ( 0 ) ; return chainAfp ( params , afpChain , ca1 , ca2 ) ;
public class AbstractBaseMessageInterpolator { /** * Visible for testing */ static Function < String , String > createAnnotationReplacer ( final Map < String , Object > map ) { } }
return from -> { final Object object = map . get ( from ) ; return object == null ? null : object . toString ( ) ; } ;
public class Strings { /** * Inject system properties into text with variables . Given text uses standard variable notation * < code > $ { variable - name } < / code > , as known from , for example , Ant or Log4j configuration . This method replace found * variable with system property using < code > variable - name < / code > as property key . It is a logic flaw if system * property is missing . * For example , injecting system properties into < code > $ { log } / app . log < / code > will return < code > / var / log / app . log < / code > * provided there is a system property named < code > log < / code > with value < code > / var / log < / code > . Note that there is no * limit on the number of system properties and / or variables to replace . * @ param string text with variables . * @ return new string with variables replaces . * @ throws BugError if system property is missing . */ public static String injectProperties ( final String string ) throws BugError { } }
return Strings . replaceAll ( string , VARIABLE_PATTERN , new Handler < String , String > ( ) { @ Override public String handle ( String variableName ) { String property = System . getProperty ( variableName ) ; if ( property == null ) { throw new BugError ( "Missing system property |%s|. String |%s| variable injection aborted." , variableName , string ) ; } return property ; } } ) ;
public class DeliveryOptionsMarshaller { /** * Marshall the given parameter object . */ public void marshall ( DeliveryOptions deliveryOptions , ProtocolMarshaller protocolMarshaller ) { } }
if ( deliveryOptions == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( deliveryOptions . getSendingPoolName ( ) , SENDINGPOOLNAME_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class StatementManager { /** * Registers a compiled statement to be managed . * The only caller should be a Session that is attempting to prepare * a statement for the first time or process a statement that has been * invalidated due to DDL changes . * @ param csid existing id or negative if the statement is not yet managed * @ param cs The CompiledStatement to add * @ return The compiled statement id assigned to the CompiledStatement * object */ private long registerStatement ( long csid , Statement cs ) { } }
if ( csid < 0 ) { csid = nextID ( ) ; int schemaid = cs . getSchemaName ( ) . hashCode ( ) ; LongValueHashMap sqlMap = ( LongValueHashMap ) schemaMap . get ( schemaid ) ; if ( sqlMap == null ) { sqlMap = new LongValueHashMap ( ) ; schemaMap . put ( schemaid , sqlMap ) ; } sqlMap . put ( cs . getSQL ( ) , csid ) ; sqlLookup . put ( csid , cs . getSQL ( ) ) ; } cs . setID ( csid ) ; csidMap . put ( csid , cs ) ; return csid ;
public class CountingLruMap { /** * Gets the all matching elements . */ public synchronized ArrayList < LinkedHashMap . Entry < K , V > > getMatchingEntries ( @ Nullable Predicate < K > predicate ) { } }
ArrayList < LinkedHashMap . Entry < K , V > > matchingEntries = new ArrayList < > ( mMap . entrySet ( ) . size ( ) ) ; for ( LinkedHashMap . Entry < K , V > entry : mMap . entrySet ( ) ) { if ( predicate == null || predicate . apply ( entry . getKey ( ) ) ) { matchingEntries . add ( entry ) ; } } return matchingEntries ;
public class EnumConstantWriterImpl { /** * { @ inheritDoc } */ @ Override protected void addSummaryLink ( LinkInfoImpl . Kind context , TypeElement typeElement , Element member , Content tdSummary ) { } }
Content memberLink = HtmlTree . SPAN ( HtmlStyle . memberNameLink , writer . getDocLink ( context , member , name ( member ) , false ) ) ; Content code = HtmlTree . CODE ( memberLink ) ; tdSummary . addContent ( code ) ;
public class PaperclipRepository { /** * region > findByAttachedTo ( programmatic ) */ @ Programmatic public List < Paperclip > findByAttachedTo ( final Object attachedTo ) { } }
if ( attachedTo == null ) { return null ; } final Bookmark bookmark = bookmarkService . bookmarkFor ( attachedTo ) ; if ( bookmark == null ) { return null ; } final String attachedToStr = bookmark . toString ( ) ; return repositoryService . allMatches ( new QueryDefault < > ( Paperclip . class , "findByAttachedTo" , "attachedToStr" , attachedToStr ) ) ;
public class IntervalHistogram { /** * Clones this histogram and zeroizes out hits afterwards if the ' reset ' is * true . * @ param reset * zero out hits * @ return clone of this histogram ' s state */ public IntervalHistogram snapshot ( boolean reset ) { } }
if ( reset ) { return new IntervalHistogram ( bins , getAndResetHits ( ) ) ; } return new IntervalHistogram ( bins , getHits ( ) ) ;
public class DefaultResolver { /** * Detects conflict but does not resolve it . * @ param siblings the list of siblings returned from Riak * @ return null or the single value in the collection * @ throws UnresolvedConflictException if { @ code siblings } has > 1 entry . */ @ Override public T resolve ( List < T > siblings ) throws UnresolvedConflictException { } }
if ( siblings . size ( ) > 1 ) { throw new UnresolvedConflictException ( "Siblings found" , siblings ) ; } else if ( siblings . size ( ) == 1 ) { return siblings . get ( 0 ) ; } else { return null ; }
public class ModelUtils { /** * Creates a model of the given type and uses the list of OpenEngSBModelEntries as initialization data . */ public static < T > T createModel ( Class < T > model , List < OpenEngSBModelEntry > entries ) { } }
if ( ! ModelWrapper . isModel ( model ) ) { throw new IllegalArgumentException ( "The given class is no model" ) ; } try { T instance = model . newInstance ( ) ; for ( OpenEngSBModelEntry entry : entries ) { if ( tryToSetValueThroughField ( entry , instance ) ) { continue ; } if ( tryToSetValueThroughSetter ( entry , instance ) ) { continue ; } ( ( OpenEngSBModel ) instance ) . addOpenEngSBModelEntry ( entry ) ; } return instance ; } catch ( InstantiationException e ) { LOGGER . error ( "InstantiationException while creating a new model instance." , e ) ; } catch ( IllegalAccessException e ) { LOGGER . error ( "IllegalAccessException while creating a new model instance." , e ) ; } catch ( SecurityException e ) { LOGGER . error ( "SecurityException while creating a new model instance." , e ) ; } return null ;
public class PeriodicReplicationService { /** * Stop replications currently in progress and cancel future scheduled replications . */ public synchronized void stopPeriodicReplication ( ) { } }
if ( isPeriodicReplicationEnabled ( ) ) { setPeriodicReplicationEnabled ( false ) ; AlarmManager alarmManager = ( AlarmManager ) getSystemService ( Context . ALARM_SERVICE ) ; Intent alarmIntent = new Intent ( this , clazz ) ; alarmIntent . setAction ( PeriodicReplicationReceiver . ALARM_ACTION ) ; PendingIntent pendingAlarmIntent = PendingIntent . getBroadcast ( this , 0 , alarmIntent , 0 ) ; alarmManager . cancel ( pendingAlarmIntent ) ; stopReplications ( ) ; } else { Log . i ( TAG , "Attempted to stop an already stopped alarm manager" ) ; }
public class ListAliasesRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ListAliasesRequest listAliasesRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( listAliasesRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( listAliasesRequest . getOrganizationId ( ) , ORGANIZATIONID_BINDING ) ; protocolMarshaller . marshall ( listAliasesRequest . getEntityId ( ) , ENTITYID_BINDING ) ; protocolMarshaller . marshall ( listAliasesRequest . getNextToken ( ) , NEXTTOKEN_BINDING ) ; protocolMarshaller . marshall ( listAliasesRequest . getMaxResults ( ) , MAXRESULTS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class AsynchConsumer { /** * Calls the registered AsynchConsumerCallback with the given * message enumeration . * @ param msgEnumeration An enumeration of the locked messages for * this AsynchConsumerCallback . */ void processMsgs ( LockedMessageEnumeration msgEnumeration , ConsumerSession consumerSession ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "processMsgs" , new Object [ ] { msgEnumeration , consumerSession } ) ; // Remember that a callback is running asynchConsumerRunning = true ; try { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( tc , "Entering asynchConsumerCallback.consumeMessages" , new Object [ ] { asynchConsumerCallback , msgEnumeration } ) ; // Call the consumeMessages method on the registered // AsynchConsumerCallback object asynchConsumerCallback . consumeMessages ( msgEnumeration ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( tc , "Exiting asynchConsumerCallback.consumeMessages" ) ; } catch ( Throwable e ) { // Catch any exceptions thrown by consumeMessages . FFDCFilter . processException ( e , "com.ibm.ws.sib.processor.impl.AsynchConsumer.processMsgs" , "1:124:1.37" , this ) ; // Notify the consumer that this exception occurred . if ( consumerSession != null ) { try { // Notify the consumer that this exception occurred . notifyExceptionListeners ( e , consumerSession ) ; } catch ( Exception connectionClosed ) { // No FFDC code needed } } // Trace the exception but otherwise swallow it since it was not // a failure in MP code or code upon which the MP is critically // dependent . if ( e instanceof Exception ) SibTr . exception ( tc , ( Exception ) e ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( tc , "Exception occurred in consumeMessages " + e ) ; // We ' re not allowed to swallow this exception - let it work its way back to // the threadpool if ( e instanceof ThreadDeath ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "processMsgs" , e ) ; throw ( ThreadDeath ) e ; } } finally { asynchConsumerRunning = false ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "processMsgs" ) ;
public class LockTable { /** * Get the waitlist for this bookmark . */ public SessionInfo popWaitlistSession ( Object bookmark ) { } }
Vector < SessionInfo > vector = m_htWaitLists . get ( bookmark ) ; if ( vector == null ) return null ; SessionInfo sessionInfo = vector . remove ( 0 ) ; if ( vector . size ( ) == 0 ) m_htWaitLists . remove ( bookmark ) ; return sessionInfo ;
public class SpringCamelContextFactory { /** * Create a single { @ link SpringCamelContext } from the given URL * @ throws IllegalStateException if the given URL does not contain a single context definition */ public static SpringCamelContext createSingleCamelContext ( URL contextUrl , ClassLoader classsLoader ) throws Exception { } }
SpringCamelContextBootstrap bootstrap = new SpringCamelContextBootstrap ( contextUrl , classsLoader ) ; List < SpringCamelContext > list = bootstrap . createSpringCamelContexts ( ) ; IllegalStateAssertion . assertEquals ( 1 , list . size ( ) , "Single context expected in: " + contextUrl ) ; return list . get ( 0 ) ;
public class LDADemo { /** * Execute the projection algorithm and return a swing JComponent representing * the clusters . */ public JComponent learn ( ) { } }
double [ ] [ ] data = dataset [ datasetIndex ] . toArray ( new double [ dataset [ datasetIndex ] . size ( ) ] [ ] ) ; String [ ] names = dataset [ datasetIndex ] . toArray ( new String [ dataset [ datasetIndex ] . size ( ) ] ) ; if ( names [ 0 ] == null ) { names = null ; } int [ ] label = dataset [ datasetIndex ] . toArray ( new int [ dataset [ datasetIndex ] . size ( ) ] ) ; int min = Math . min ( label ) ; for ( int i = 0 ; i < label . length ; i ++ ) { label [ i ] -= min ; } long clock = System . currentTimeMillis ( ) ; FLD lda = new FLD ( data , label , Math . unique ( label ) . length > 3 ? 3 : 2 ) ; System . out . format ( "Learn LDA from %d samples in %dms\n" , data . length , System . currentTimeMillis ( ) - clock ) ; double [ ] [ ] y = lda . project ( data ) ; PlotCanvas plot = new PlotCanvas ( Math . colMin ( y ) , Math . colMax ( y ) ) ; if ( names != null ) { plot . points ( y , names ) ; } else if ( dataset [ datasetIndex ] . responseAttribute ( ) != null ) { int [ ] labels = dataset [ datasetIndex ] . toArray ( new int [ dataset [ datasetIndex ] . size ( ) ] ) ; for ( int i = 0 ; i < y . length ; i ++ ) { plot . point ( pointLegend , Palette . COLORS [ labels [ i ] ] , y [ i ] ) ; } } else { plot . points ( y , pointLegend ) ; } plot . setTitle ( "Linear Discriminant Analysis" ) ; return plot ;
public class Helpers { /** * Create a Deliverable inside the database that will track a file created by a JobInstance Must be called from inside a transaction * @ param path * FilePath ( relative to a root directory - cf . Node ) * @ param originalFileName * FileName * @ param fileFamily * File family ( may be null ) . E . g . : " daily report " * @ param jobId * Job Instance ID * @ param cnx * the DbConn to use . */ static int createDeliverable ( String path , String originalFileName , String fileFamily , Integer jobId , DbConn cnx ) { } }
QueryResult qr = cnx . runUpdate ( "deliverable_insert" , fileFamily , path , jobId , originalFileName , UUID . randomUUID ( ) . toString ( ) ) ; return qr . getGeneratedId ( ) ;
public class VersionableWorkspaceDataManager { /** * { @ inheritDoc } */ @ Override public ItemData getItemData ( NodeData parentData , QPathEntry name , ItemType itemType ) throws RepositoryException { } }
return getItemData ( parentData , name , itemType , true ) ;
public class MetricName { /** * Same as { @ link # tagged ( Map ) } , but takes a variadic list * of arguments . * @ see # tagged ( Map ) * @ param pairs An even list of strings acting as key - value pairs . * @ return A newly created metric name with the specified tags associated * with it . */ public MetricName tagged ( String ... pairs ) { } }
if ( pairs == null ) { return this ; } if ( pairs . length % 2 != 0 ) { throw new IllegalArgumentException ( "Argument count must be even" ) ; } final Map < String , String > add = new HashMap < > ( ) ; for ( int i = 0 ; i < pairs . length ; i += 2 ) { add . put ( pairs [ i ] , pairs [ i + 1 ] ) ; } return tagged ( add ) ;
public class ChannelManager { /** * Returns a channel from the ChannelManager ' s cache with the given name If * multiple channels are found , returns the most recently CREATED one . If * two channels with the very same date exist , avoid HUNGUP ones . * @ param name the name of the requested channel . * @ return the ( most recent ) channel if found , in any state , or null if none * found . */ AsteriskChannelImpl getChannelImplByName ( String name ) { } }
Date dateOfCreation = null ; AsteriskChannelImpl channel = null ; if ( name == null ) { return null ; } synchronized ( channels ) { for ( AsteriskChannelImpl tmp : channels . values ( ) ) { if ( name . equals ( tmp . getName ( ) ) ) { // return the most recent channel or when dates are similar , // the active one if ( dateOfCreation == null || tmp . getDateOfCreation ( ) . after ( dateOfCreation ) || ( tmp . getDateOfCreation ( ) . equals ( dateOfCreation ) && tmp . getState ( ) != ChannelState . HUNGUP ) ) { channel = tmp ; dateOfCreation = channel . getDateOfCreation ( ) ; } } } } return channel ;
public class I2CLcdDisplay { /** * private voi */ private void write ( int incomingData ) throws Exception { } }
int tmpData = incomingData ; BitSet bits = fromByte ( ( byte ) tmpData ) ; byte out = ( byte ) ( ( bits . get ( 3 ) ? 1 << d7Bit : 0 << d7Bit ) | ( bits . get ( 2 ) ? 1 << d6Bit : 0 << d6Bit ) | ( bits . get ( 1 ) ? 1 << d5Bit : 0 << d5Bit ) | ( bits . get ( 0 ) ? 1 << d4Bit : 0 << d4Bit ) | ( isBacklight ( ) ? 1 << backlightBit : 0 << backlightBit ) | ( rsFlag ? 1 << rsBit : 0 << rsBit ) | ( eFlag ? 1 << eBit : 0 << eBit ) ) ; // ReMap - Default case where everything just works is /* * 7 backlightBit * 6 rsBit * 5 rwBit * 4 eBit * 3 d7 * 2 d6 * 1 d5 * 0 d4 */ /* * Sainsmart Use case : * 3 backlightBit * 0 rsBit * 1 rwBit * 2 eBit * 7 d7 * 6 d6 * 5 d5 * 4 d4 */ dev . write ( out ) ;
public class AmazonWorkDocsClient { /** * Describes the permissions of a specified resource . * @ param describeResourcePermissionsRequest * @ return Result of the DescribeResourcePermissions operation returned by the service . * @ throws UnauthorizedOperationException * The operation is not permitted . * @ throws UnauthorizedResourceAccessException * The caller does not have access to perform the action on the resource . * @ throws FailedDependencyException * The AWS Directory Service cannot reach an on - premises instance . Or a dependency under the control of the * organization is failing , such as a connected Active Directory . * @ throws ServiceUnavailableException * One or more of the dependencies is unavailable . * @ sample AmazonWorkDocs . DescribeResourcePermissions * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / workdocs - 2016-05-01 / DescribeResourcePermissions " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DescribeResourcePermissionsResult describeResourcePermissions ( DescribeResourcePermissionsRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDescribeResourcePermissions ( request ) ;
public class AppsImpl { /** * Lists all of the user applications . * @ param listOptionalParameter the object representing the optional parameters to be set before calling this API * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws ErrorResponseException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the List & lt ; ApplicationInfoResponse & gt ; object if successful . */ public List < ApplicationInfoResponse > list ( ListAppsOptionalParameter listOptionalParameter ) { } }
return listWithServiceResponseAsync ( listOptionalParameter ) . toBlocking ( ) . single ( ) . body ( ) ;
public class AmazonCloudWatchClient { /** * Returns a list of the dashboards for your account . If you include < code > DashboardNamePrefix < / code > , only those * dashboards with names starting with the prefix are listed . Otherwise , all dashboards in your account are listed . * < code > ListDashboards < / code > returns up to 1000 results on one page . If there are more than 1000 dashboards , you * can call < code > ListDashboards < / code > again and include the value you received for < code > NextToken < / code > in the * first call , to receive the next 1000 results . * @ param listDashboardsRequest * @ return Result of the ListDashboards operation returned by the service . * @ throws InvalidParameterValueException * The value of an input parameter is bad or out - of - range . * @ throws InternalServiceException * Request processing has failed due to some unknown error , exception , or failure . * @ sample AmazonCloudWatch . ListDashboards * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / monitoring - 2010-08-01 / ListDashboards " target = " _ top " > AWS API * Documentation < / a > */ @ Override public ListDashboardsResult listDashboards ( ListDashboardsRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeListDashboards ( request ) ;
public class CMYKColor { /** * Calculates a color or colors between two color at a specific increment . * @ param colorSet1 * interpolate from this color * @ param colorSet2 * interpolate to this color * @ param amt * between 0.0 and 1.0 * @ return * The calculated color values . */ public static Color lerpColor ( ColorSet colorSet1 , ColorSet colorSet2 , double amt ) { } }
return lerpColor ( ( CMYKColor ) CMYKColor . color ( colorSet1 ) , ( CMYKColor ) CMYKColor . color ( colorSet2 ) , amt ) ;
public class InvocationStatFactory { /** * 对批量InvocationStat快照进行一个更新 * @ param snapshots InvocationStat */ public static void updateInvocationStats ( List < InvocationStat > snapshots ) { } }
for ( InvocationStat snapshot : snapshots ) { getInvocationStat ( snapshot . getDimension ( ) ) . update ( snapshot ) ; }
public class MySQLProtocol { /** * create a DrizzleQueryResult - precondition is that a result set packet has been read * @ param packet the result set packet from the server * @ return a DrizzleQueryResult * @ throws java . io . IOException when something goes wrong while reading / writing from the server */ private QueryResult createDrizzleQueryResult ( final ResultSetPacket packet ) throws IOException , QueryException { } }
final List < ColumnInformation > columnInformation = new ArrayList < ColumnInformation > ( ) ; for ( int i = 0 ; i < packet . getFieldCount ( ) ; i ++ ) { final RawPacket rawPacket = packetFetcher . getRawPacket ( ) ; final ColumnInformation columnInfo = MySQLFieldPacket . columnInformationFactory ( rawPacket ) ; columnInformation . add ( columnInfo ) ; } packetFetcher . getRawPacket ( ) ; final List < List < ValueObject > > valueObjects = new ArrayList < List < ValueObject > > ( ) ; while ( true ) { final RawPacket rawPacket = packetFetcher . getRawPacket ( ) ; if ( ReadUtil . isErrorPacket ( rawPacket ) ) { ErrorPacket errorPacket = ( ErrorPacket ) ResultPacketFactory . createResultPacket ( rawPacket ) ; checkIfCancelled ( ) ; throw new QueryException ( errorPacket . getMessage ( ) , errorPacket . getErrorNumber ( ) , errorPacket . getSqlState ( ) ) ; } if ( ReadUtil . eofIsNext ( rawPacket ) ) { final EOFPacket eofPacket = ( EOFPacket ) ResultPacketFactory . createResultPacket ( rawPacket ) ; this . hasMoreResults = eofPacket . getStatusFlags ( ) . contains ( EOFPacket . ServerStatus . SERVER_MORE_RESULTS_EXISTS ) ; checkIfCancelled ( ) ; return new DrizzleQueryResult ( columnInformation , valueObjects , eofPacket . getWarningCount ( ) ) ; } if ( getDatabaseType ( ) == SupportedDatabases . MYSQL ) { final MySQLRowPacket rowPacket = new MySQLRowPacket ( rawPacket , columnInformation ) ; valueObjects . add ( rowPacket . getRow ( packetFetcher ) ) ; } else { final DrizzleRowPacket rowPacket = new DrizzleRowPacket ( rawPacket , columnInformation ) ; valueObjects . add ( rowPacket . getRow ( ) ) ; } }
public class KafkaSource { /** * This function need to be thread safe since it is called in the Runnable */ private List < WorkUnit > getWorkUnitsForTopic ( KafkaTopic topic , SourceState state , Optional < State > topicSpecificState ) { } }
Timer . Context context = this . metricContext . timer ( "isTopicQualifiedTimer" ) . time ( ) ; boolean topicQualified = isTopicQualified ( topic ) ; context . close ( ) ; List < WorkUnit > workUnits = Lists . newArrayList ( ) ; for ( KafkaPartition partition : topic . getPartitions ( ) ) { WorkUnit workUnit = getWorkUnitForTopicPartition ( partition , state , topicSpecificState ) ; if ( workUnit != null ) { // For disqualified topics , for each of its workunits set the high watermark to be the same // as the low watermark , so that it will be skipped . if ( ! topicQualified ) { skipWorkUnit ( workUnit ) ; } workUnits . add ( workUnit ) ; } } this . partitionsToBeProcessed . addAll ( topic . getPartitions ( ) ) ; return workUnits ;
public class AmazonDynamoDBAsyncClient { /** * Retrieves a paginated list of table names created by the AWS Account * of the caller in the AWS Region ( e . g . < code > us - east - 1 < / code > ) . * @ param listTablesRequest Container for the necessary parameters to * execute the ListTables operation on AmazonDynamoDB . * @ return A Java Future object containing the response from the * ListTables service method , as returned by AmazonDynamoDB . * @ throws AmazonClientException * If any internal errors are encountered inside the client while * attempting to make the request or handle the response . For example * if a network connection is not available . * @ throws AmazonServiceException * If an error response is returned by AmazonDynamoDB indicating * either a problem with the data in the request , or a server side issue . */ public Future < ListTablesResult > listTablesAsync ( final ListTablesRequest listTablesRequest ) throws AmazonServiceException , AmazonClientException { } }
return executorService . submit ( new Callable < ListTablesResult > ( ) { public ListTablesResult call ( ) throws Exception { return listTables ( listTablesRequest ) ; } } ) ;
public class Response { /** * If Content - Type header is not set , it is set to " application / octet - stream " . * @ param byteBuf Will be released */ public ChannelFuture respondBinary ( ByteBuf byteBuf ) throws Exception { } }
if ( HttpUtil . isTransferEncodingChunked ( response ) ) { respondHeadersOnlyForFirstChunk ( ) ; return channel . writeAndFlush ( new DefaultHttpContent ( byteBuf ) ) ; } else { if ( ! response . headers ( ) . contains ( CONTENT_TYPE ) ) response . headers ( ) . set ( CONTENT_TYPE , "application/octet-stream" ) ; response . headers ( ) . set ( CONTENT_LENGTH , byteBuf . readableBytes ( ) ) ; response . content ( ) . writeBytes ( byteBuf ) ; return respond ( ) ; }
public class SqlLoaderImpl { /** * 指定されたパッケージ以下のSQLを順次読み込みする < br > * 文末の " / " は削除される < br > * @ param packageName パッケージ名を格納するStringBuilder * @ param dir 探索対象ディレクトリ * @ return SQL識別子をキーとしたSQL文のMap * @ throws IOException ファイルアクセスに失敗した場合 */ private ConcurrentHashMap < String , String > load ( final StringBuilder packageName , final File dir ) throws IOException { } }
LOG . debug ( "Loading SQL template.[{}]" , packageName ) ; ConcurrentHashMap < String , String > sqlMap = new ConcurrentHashMap < > ( ) ; File [ ] files = dir . listFiles ( ) ; for ( File file : files ) { String fileName = file . getName ( ) ; if ( file . isDirectory ( ) ) { sqlMap . putAll ( load ( makeNewPackageName ( packageName , file ) , file ) ) ; } else if ( fileName . toLowerCase ( ) . endsWith ( fileExtension ) ) { String sql = trimSlash ( read ( new BufferedReader ( new InputStreamReader ( new FileInputStream ( file ) , getSqlEncoding ( ) ) ) ) ) ; String sqlName = makeSqlName ( packageName , fileName ) ; sqlMap . put ( sqlName , sql ) ; LOG . trace ( "Loaded SQL template.[{}]" , fileName ) ; LOG . trace ( "Add SQL template.[{}],[{}]" , sqlName , sql ) ; } } return sqlMap ;
public class DirectedAcyclicGraph { /** * Sorts a given set of payloads topologically based on the DAG . This method requires all the * payloads to be in the DAG . * @ param payloads the set of input payloads * @ return the payloads after topological sort */ public List < T > sortTopologically ( Set < T > payloads ) { } }
List < T > result = new ArrayList < > ( ) ; Set < T > input = new HashSet < > ( payloads ) ; Deque < DirectedAcyclicGraphNode < T > > toVisit = new ArrayDeque < > ( mRoots ) ; while ( ! toVisit . isEmpty ( ) ) { DirectedAcyclicGraphNode < T > visit = toVisit . removeFirst ( ) ; T payload = visit . getPayload ( ) ; if ( input . remove ( payload ) ) { result . add ( visit . getPayload ( ) ) ; } toVisit . addAll ( visit . getChildren ( ) ) ; } Preconditions . checkState ( input . isEmpty ( ) , "Not all the given payloads are in the DAG: " , input ) ; return result ;
public class CloseableExecutorService { /** * Closes any tasks currently in progress */ @ Override public void close ( ) { } }
isOpen . set ( false ) ; Iterator < Future < ? > > iterator = futures . iterator ( ) ; while ( iterator . hasNext ( ) ) { Future < ? > future = iterator . next ( ) ; iterator . remove ( ) ; if ( ! future . isDone ( ) && ! future . isCancelled ( ) && ! future . cancel ( true ) ) { log . warn ( "Could not cancel " + future ) ; } } if ( shutdownOnClose ) { executorService . shutdownNow ( ) ; }
public class DRFModelV3 { /** * Version & Schema - specific filling into the impl */ @ Override public DRFModel createImpl ( ) { } }
DRFV3 . DRFParametersV3 p = this . parameters ; DRFModel . DRFParameters parms = p . createImpl ( ) ; return new DRFModel ( model_id . key ( ) , parms , new DRFModel . DRFOutput ( null ) ) ;
public class ReentrantTransactionDispatcher { /** * Acquire transaction with a single permit in a thread . Transactions are acquired reentrantly , i . e . * with respect to transactions already acquired in the thread . * @ return the number of acquired permits , identically equal to 1. */ int acquireTransaction ( @ NotNull final Thread thread ) { } }
try ( CriticalSection ignored = criticalSection . enter ( ) ) { final int currentThreadPermits = getThreadPermitsToAcquire ( thread ) ; waitForPermits ( thread , currentThreadPermits > 0 ? nestedQueue : regularQueue , 1 , currentThreadPermits ) ; } return 1 ;
public class FloatAttribute { /** * ByteAttribute operands */ public FloatAttribute plus ( com . gs . fw . finder . attribute . ByteAttribute attribute ) { } }
return FloatNumericType . getInstance ( ) . createCalculatedAttribute ( FloatNumericType . getInstance ( ) . createAdditionCalculator ( this , ( ByteAttribute ) attribute ) ) ;
public class TranSynchronization { /** * Upon successful transaction commit status , store the value of the committed metrics . * Upon any other status value roll back the metrics to the last committed value . * @ see javax . transaction . Synchronization # afterCompletion ( int ) */ @ Override public void afterCompletion ( int status ) { } }
logger . log ( Level . FINE , "The status of the transaction commit is: " + status ) ; if ( status == Status . STATUS_COMMITTED ) { // Save the metrics object after a successful commit runtimeStepExecution . setCommittedMetrics ( ) ; } else { // status = 4 = STATUS _ ROLLEDBACK ; runtimeStepExecution . rollBackMetrics ( ) ; }
public class Ifc2x3tc1PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getIfcWindowStyle ( ) { } }
if ( ifcWindowStyleEClass == null ) { ifcWindowStyleEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc2x3tc1Package . eNS_URI ) . getEClassifiers ( ) . get ( 648 ) ; } return ifcWindowStyleEClass ;
public class AmazonRDSClient { /** * Modifies the properties of an endpoint in an Amazon Aurora DB cluster . * < note > * This action only applies to Aurora DB clusters . * < / note > * @ param modifyDBClusterEndpointRequest * @ return Result of the ModifyDBClusterEndpoint operation returned by the service . * @ throws InvalidDBClusterStateException * The requested operation can ' t be performed while the cluster is in this state . * @ throws InvalidDBClusterEndpointStateException * The requested operation can ' t be performed on the endpoint while the endpoint is in this state . * @ throws DBClusterEndpointNotFoundException * The specified custom endpoint doesn ' t exist . * @ throws DBInstanceNotFoundException * < i > DBInstanceIdentifier < / i > doesn ' t refer to an existing DB instance . * @ throws InvalidDBInstanceStateException * The DB instance isn ' t in a valid state . * @ sample AmazonRDS . ModifyDBClusterEndpoint * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / rds - 2014-10-31 / ModifyDBClusterEndpoint " target = " _ top " > AWS * API Documentation < / a > */ @ Override public ModifyDBClusterEndpointResult modifyDBClusterEndpoint ( ModifyDBClusterEndpointRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeModifyDBClusterEndpoint ( request ) ;
public class GradientActivity { /** * This is where you specify custom camera settings . See { @ link boofcv . android . camera2 . SimpleCamera2Activity } ' s * JavaDoc for more funcitons which you can override . * @ param captureRequestBuilder Used to configure the camera . */ @ Override protected void configureCamera ( CameraDevice device , CameraCharacteristics characteristics , CaptureRequest . Builder captureRequestBuilder ) { } }
captureRequestBuilder . set ( CaptureRequest . CONTROL_AF_MODE , CaptureRequest . CONTROL_AF_MODE_CONTINUOUS_VIDEO ) ; captureRequestBuilder . set ( CaptureRequest . CONTROL_AE_MODE , CaptureRequest . CONTROL_AE_MODE_ON ) ;
public class CmsADEManager { /** * Returns the settings configured for the given formatter . < p > * @ param cms the cms context * @ param mainFormatter the formatter * @ param res the element resource * @ param locale the content locale * @ param req the current request , if available * @ return the settings configured for the given formatter */ public Map < String , CmsXmlContentProperty > getFormatterSettings ( CmsObject cms , I_CmsFormatterBean mainFormatter , CmsResource res , Locale locale , ServletRequest req ) { } }
Map < String , CmsXmlContentProperty > result = new LinkedHashMap < String , CmsXmlContentProperty > ( ) ; Visibility defaultVisibility = Visibility . elementAndParentIndividual ; if ( mainFormatter != null ) { for ( Entry < String , CmsXmlContentProperty > entry : mainFormatter . getSettings ( ) . entrySet ( ) ) { Visibility visibility = entry . getValue ( ) . getVisibility ( defaultVisibility ) ; if ( ! ( visibility . equals ( Visibility . parentShared ) || visibility . equals ( Visibility . parentIndividual ) ) ) { result . put ( entry . getKey ( ) , entry . getValue ( ) ) ; } } if ( mainFormatter . hasNestedFormatterSettings ( ) ) { List < I_CmsFormatterBean > nestedFormatters = getNestedFormatters ( cms , res , locale , req ) ; if ( nestedFormatters != null ) { for ( I_CmsFormatterBean formatter : nestedFormatters ) { for ( Entry < String , CmsXmlContentProperty > entry : formatter . getSettings ( ) . entrySet ( ) ) { Visibility visibility = entry . getValue ( ) . getVisibility ( defaultVisibility ) ; switch ( visibility ) { case parentShared : case elementAndParentShared : result . put ( entry . getKey ( ) , entry . getValue ( ) ) ; break ; case elementAndParentIndividual : case parentIndividual : String settingName = formatter . getId ( ) + "_" + entry . getKey ( ) ; CmsXmlContentProperty settingConf = entry . getValue ( ) . withName ( settingName ) ; result . put ( settingName , settingConf ) ; break ; default : break ; } } } } } } return result ;
public class AmazonQuickSightClient { /** * Lists the Amazon QuickSight groups that an Amazon QuickSight user is a member of . * The permission resource is * < code > arn : aws : quicksight : us - east - 1 : < i > & lt ; aws - account - id & gt ; < / i > : user / default / < i > & lt ; user - name & gt ; < / i > < / code > . * The response is a one or more group objects . * < b > CLI Sample : < / b > * < code > aws quicksight list - user - groups - \ - user - name = Pat - \ - aws - account - id = 111122223333 - \ - namespace = default - \ - region = us - east - 1 < / code > * @ param listUserGroupsRequest * @ return Result of the ListUserGroups operation returned by the service . * @ throws AccessDeniedException * You don ' t have access to this . The provided credentials couldn ' t be validated . You might not be * authorized to carry out the request . Ensure that your account is authorized to use the Amazon QuickSight * service , that your policies have the correct permissions , and that you are using the correct access keys . * @ throws InvalidParameterValueException * One or more parameters don ' t have a valid value . * @ throws ResourceNotFoundException * One or more resources can ' t be found . * @ throws ThrottlingException * Access is throttled . * @ throws InternalFailureException * An internal failure occurred . * @ throws ResourceUnavailableException * This resource is currently unavailable . * @ sample AmazonQuickSight . ListUserGroups * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / quicksight - 2018-04-01 / ListUserGroups " target = " _ top " > AWS API * Documentation < / a > */ @ Override public ListUserGroupsResult listUserGroups ( ListUserGroupsRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeListUserGroups ( request ) ;
public class AbstractXMLReader { /** * / * - - - - - [ Properties ] - - - - - */ @ Override public void setProperty ( String name , Object value ) throws SAXNotRecognizedException , SAXNotSupportedException { } }
if ( ! _setProperty ( name , value ) ) throw new SAXNotRecognizedException ( name ) ;
public class PCA9685GpioProvider { /** * The built - in Oscillator runs at ~ 25MHz . For better accuracy user can provide a correction * factor to meet desired frequency . < p > * < b > Note : < / b > correction is limited to a certain degree because the calculated prescale value has to be * rounded to an integer value ! * < b > Example : < / b > < br > * target freq : 50Hz < br > * actual freq : 52.93Hz < br > * correction factor : 52.93 / 50 = 1.0586 < br > * @ param targetFrequency desired frequency * @ param frequencyCorrectionFactor ' actual frequency ' / ' target frequency ' */ public void setFrequency ( BigDecimal targetFrequency , BigDecimal frequencyCorrectionFactor ) { } }
validateFrequency ( targetFrequency ) ; frequency = targetFrequency ; periodDurationMicros = calculatePeriodDuration ( ) ; int prescale = calculatePrescale ( frequencyCorrectionFactor ) ; int oldMode ; try { oldMode = device . read ( PCA9685A_MODE1 ) ; int newMode = ( oldMode & 0x7F ) | 0x10 ; // sleep device . write ( PCA9685A_MODE1 , ( byte ) newMode ) ; // go to sleep device . write ( PCA9685A_PRESCALE , ( byte ) prescale ) ; device . write ( PCA9685A_MODE1 , ( byte ) oldMode ) ; Thread . sleep ( 1 ) ; device . write ( PCA9685A_MODE1 , ( byte ) ( oldMode | 0x80 ) ) ; } catch ( IOException e ) { throw new RuntimeException ( "Unable to set prescale value [" + prescale + "]" , e ) ; } catch ( InterruptedException e ) { throw new RuntimeException ( e ) ; }
public class SlotPoolImpl { /** * Start the slot pool to accept RPC calls . * @ param jobMasterId The necessary leader id for running the job . * @ param newJobManagerAddress for the slot requests which are sent to the resource manager * @ param componentMainThreadExecutor The main thread executor for the job master ' s main thread . */ public void start ( @ Nonnull JobMasterId jobMasterId , @ Nonnull String newJobManagerAddress , @ Nonnull ComponentMainThreadExecutor componentMainThreadExecutor ) throws Exception { } }
this . jobMasterId = jobMasterId ; this . jobManagerAddress = newJobManagerAddress ; this . componentMainThreadExecutor = componentMainThreadExecutor ; scheduleRunAsync ( this :: checkIdleSlot , idleSlotTimeout ) ; if ( log . isDebugEnabled ( ) ) { scheduleRunAsync ( this :: scheduledLogStatus , STATUS_LOG_INTERVAL_MS , TimeUnit . MILLISECONDS ) ; }
public class FileCache { /** * Leave a 5 seconds delay to clear the local file . */ public void deleteTmpFile ( String name , JobID jobID ) { } }
DeleteProcess dp = new DeleteProcess ( name , jobID , count . get ( new ImmutablePair ( jobID , name ) ) ) ; executorService . schedule ( dp , 5000L , TimeUnit . MILLISECONDS ) ;
public class FileSystemSizeMarshaller { /** * Marshall the given parameter object . */ public void marshall ( FileSystemSize fileSystemSize , ProtocolMarshaller protocolMarshaller ) { } }
if ( fileSystemSize == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( fileSystemSize . getValue ( ) , VALUE_BINDING ) ; protocolMarshaller . marshall ( fileSystemSize . getTimestamp ( ) , TIMESTAMP_BINDING ) ; protocolMarshaller . marshall ( fileSystemSize . getValueInIA ( ) , VALUEINIA_BINDING ) ; protocolMarshaller . marshall ( fileSystemSize . getValueInStandard ( ) , VALUEINSTANDARD_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class TimeZone { /** * Returns a new SimpleTimeZone for an ID of the form " GMT [ + | - ] hh [ [ : ] mm ] " , or null . */ private static TimeZone getCustomTimeZone ( String id ) { } }
Matcher m = NoImagePreloadHolder . CUSTOM_ZONE_ID_PATTERN . matcher ( id ) ; if ( ! m . matches ( ) ) { return null ; } int hour ; int minute = 0 ; try { hour = Integer . parseInt ( m . group ( 1 ) ) ; if ( m . group ( 3 ) != null ) { minute = Integer . parseInt ( m . group ( 3 ) ) ; } } catch ( NumberFormatException impossible ) { throw new AssertionError ( impossible ) ; } if ( hour < 0 || hour > 23 || minute < 0 || minute > 59 ) { return null ; } char sign = id . charAt ( 3 ) ; int raw = ( hour * 3600000 ) + ( minute * 60000 ) ; if ( sign == '-' ) { raw = - raw ; } String cleanId = String . format ( Locale . ROOT , "GMT%c%02d:%02d" , sign , hour , minute ) ; return new SimpleTimeZone ( raw , cleanId ) ;
public class DataTypeParser { /** * Returns a long value from the input token stream assuming the long is bracketed with parenthesis . * @ param tokens * @ param dataType * @ return the long value */ protected long parseBracketedLong ( DdlTokenStream tokens , DataType dataType ) { } }
consume ( tokens , dataType , false , L_PAREN ) ; String value = consume ( tokens , dataType , false ) ; consume ( tokens , dataType , false , R_PAREN ) ; return parseLong ( value ) ;
public class Bean { /** * Reset the state timeout timer . If this timer fires , the client has waited too long for a * state update from the Bean and an error will be fired . */ private void resetSketchStateTimeout ( ) { } }
TimerTask onTimeout = new TimerTask ( ) { @ Override public void run ( ) { returnUploadError ( BeanError . STATE_TIMEOUT ) ; } } ; stopSketchStateTimeout ( ) ; sketchStateTimeout = new Timer ( ) ; sketchStateTimeout . schedule ( onTimeout , SKETCH_UPLOAD_STATE_TIMEOUT ) ;
public class Flow { /** * Adds a history as an extra to an Intent . */ public static void addHistory ( @ NonNull Intent intent , @ NonNull History history , @ NonNull KeyParceler parceler ) { } }
InternalLifecycleIntegration . addHistoryToIntent ( intent , history , parceler ) ;
public class CmsJlanNetworkFile { /** * Deletes the file . < p > * @ throws IOException if something goes wrong */ public void delete ( ) throws IOException { } }
try { load ( false ) ; ensureLock ( ) ; m_cms . deleteResource ( m_cms . getSitePath ( m_resource ) , CmsResource . DELETE_PRESERVE_SIBLINGS ) ; if ( ! m_resource . getState ( ) . isNew ( ) ) { try { m_cms . unlockResource ( m_cms . getSitePath ( m_resource ) ) ; } catch ( CmsException e ) { LOG . warn ( e . getLocalizedMessage ( ) , e ) ; } } } catch ( CmsException e ) { throw CmsJlanDiskInterface . convertCmsException ( e ) ; }
public class Models { /** * Summarize fields which are generic to water . Model . */ private static void summarizeModelCommonFields ( ModelSummary summary , Model model ) { } }
String [ ] names = model . _names ; summary . warnings = model . warnings ; summary . model_algorithm = model . getClass ( ) . toString ( ) ; // fallback only // model . job ( ) is a local copy ; on multinode clusters we need to get from the DKV Key job_key = ( ( Job ) model . job ( ) ) . self ( ) ; if ( null == job_key ) throw H2O . fail ( "Null job key for model: " + ( model == null ? "null model" : model . _key ) ) ; // later when we deserialize models from disk we ' ll relax this constraint Job job = DKV . get ( job_key ) . get ( ) ; summary . state = job . getState ( ) ; summary . model_category = model . getModelCategory ( ) ; UniqueId unique_id = model . getUniqueId ( ) ; summary . id = unique_id . getId ( ) ; summary . key = unique_id . getKey ( ) ; summary . creation_epoch_time_millis = unique_id . getCreationEpochTimeMillis ( ) ; summary . training_duration_in_ms = model . training_duration_in_ms ; summary . response_column_name = names [ names . length - 1 ] ; for ( int i = 0 ; i < names . length - 1 ; i ++ ) summary . input_column_names . add ( names [ i ] ) ; // Ugh . VarImp vi = model . varimp ( ) ; if ( null != vi ) { summary . variable_importances = new LinkedHashMap ( ) ; summary . variable_importances . put ( "varimp" , vi . varimp ) ; summary . variable_importances . put ( "variables" , vi . getVariables ( ) ) ; summary . variable_importances . put ( "method" , vi . method ) ; summary . variable_importances . put ( "max_var" , vi . max_var ) ; summary . variable_importances . put ( "scaled" , vi . scaled ( ) ) ; }
public class LogViewSerialization { /** * Serializes the log view under the given path . * @ param logView Log view to serialize . * @ param path Target path of the serialized log view . * @ throws IOException If the log view can ' t be written under the given * path . */ public static void write ( LogView logView , String path ) throws IOException { } }
String xml = xstream . toXML ( logView ) ; try ( BufferedWriter out = new BufferedWriter ( new FileWriter ( path ) ) ) { out . write ( xml ) ; }
public class KeyDecoder { /** * Decodes the given byte array which was encoded by { @ link * KeyEncoder # encodeSingleDesc } . Always returns a new byte array instance . * @ param prefixPadding amount of extra bytes to skip from start of encoded byte array * @ param suffixPadding amount of extra bytes to skip at end of encoded byte array */ public static byte [ ] decodeSingleDesc ( byte [ ] src , int prefixPadding , int suffixPadding ) throws CorruptEncodingException { } }
try { int length = src . length - suffixPadding - prefixPadding ; if ( length == 0 ) { return EMPTY_BYTE_ARRAY ; } byte [ ] dst = new byte [ length ] ; while ( -- length >= 0 ) { dst [ length ] = ( byte ) ( ~ src [ prefixPadding + length ] ) ; } return dst ; } catch ( IndexOutOfBoundsException e ) { throw new CorruptEncodingException ( null , e ) ; }
public class TabularDataExtractor { /** * See also # 97 for details . */ private Object getKey ( CompositeType rowType , String key , String value ) { } }
OpenType keyType = rowType . getType ( key ) ; if ( SimpleType . STRING == keyType ) { return value ; } else if ( SimpleType . INTEGER == keyType ) { return Integer . parseInt ( value ) ; } else if ( SimpleType . LONG == keyType ) { return Long . parseLong ( value ) ; } else if ( SimpleType . SHORT == keyType ) { return Short . parseShort ( value ) ; } else if ( SimpleType . BYTE == keyType ) { return Byte . parseByte ( value ) ; } else if ( SimpleType . OBJECTNAME == keyType ) { try { return new ObjectName ( value ) ; } catch ( MalformedObjectNameException e ) { throw new IllegalArgumentException ( "Can not convert " + value + " to an ObjectName" , e ) ; } } else { throw new IllegalArgumentException ( "All keys must be a string, integer, long, short, byte or ObjectName type for accessing TabularData via a path. " + "This is not the case for '" + key + "' which is of type " + keyType ) ; }
public class FileLog { /** * This method will check to see if the supplied parameters match the settings on the < code > oldLog < / code > , * if they do then the < code > oldLog < / code > is returned , otherwise a new FileLogHolder will be created . * @ param oldLog The previous FileLogHolder that may or may not be replaced by a new one , may * be < code > null < / code > ( will cause a new instance to be created ) * @ param logHeader * Header to print at the top of new log files * @ param logDirectory * Directory in which to store created log files * @ param newFileName * File name for new log : this will be split into a name and extension * @ param maxFiles * New maximum number of log files . If 0 , log files won ' t be pruned . * @ param maxSizeBytes * New maximum log file size in bytes . If 0 , log files won ' t be rolled . * @ return a log holder . If all values are the same , the old one is returned , otherwise a new log holder is created . */ public static FileLog createFileLogHolder ( FileLog oldLog , File logDirectory , String newFileName , int maxFiles , long maxSizeBytes ) { } }
final FileLog logHolder ; // We ' re only supporting names in the log directory // Our configurations encourage use of forward slash on all platforms int lio = newFileName . lastIndexOf ( "/" ) ; if ( lio > 0 ) { newFileName = newFileName . substring ( lio + 1 ) ; } if ( File . separatorChar != '/' ) { // Go sniffing for other separators where we should ( windows ) 2 lio = newFileName . lastIndexOf ( File . separatorChar ) ; if ( lio > 0 ) { newFileName = newFileName . substring ( lio + 1 ) ; } } final String fileName ; final String fileExtension ; // Find the name vs . extension : name = file extension = . log final int dio = newFileName . lastIndexOf ( "." ) ; if ( dio > 0 ) { fileName = newFileName . substring ( 0 , dio ) ; fileExtension = newFileName . substring ( dio ) ; } else { fileName = newFileName ; fileExtension = "" ; } // IF there are changes to the rolling behavior , it will show up in a change to either // maxFiles or maxBytes if ( oldLog != null ) { logHolder = oldLog ; logHolder . update ( logDirectory , fileName , fileExtension , maxFiles , maxSizeBytes ) ; } else { // Send to bit bucket until the file is created ( true - - create / replace if needed ) . logHolder = new FileLog ( logDirectory , fileName , fileExtension , maxFiles , maxSizeBytes ) ; } return logHolder ;
public class FDBigInt { /** * Subtract one FDBigInt from another . Return a FDBigInt * Assert that the result is positive . */ public FDBigInt sub ( FDBigInt other ) { } }
int r [ ] = new int [ this . nWords ] ; int i ; int n = this . nWords ; int m = other . nWords ; int nzeros = 0 ; long c = 0L ; for ( i = 0 ; i < n ; i ++ ) { c += ( long ) this . data [ i ] & 0xffffffffL ; if ( i < m ) { c -= ( long ) other . data [ i ] & 0xffffffffL ; } if ( ( r [ i ] = ( int ) c ) == 0 ) nzeros ++ ; else nzeros = 0 ; c >>= 32 ; // signed shift } assert c == 0L : c ; // borrow out of subtract assert dataInRangeIsZero ( i , m , other ) ; // negative result of subtract return new FDBigInt ( r , n - nzeros ) ;
public class DFSFile { /** * Provides a detailed string for this file * @ return the string formatted as * < tt > & lt ; filename & gt ; ( & lt ; size & gt ; , r & lt ; replication & gt ; ) < / tt > */ public String toDetailedString ( ) { } }
final String [ ] units = { "b" , "Kb" , "Mb" , "Gb" , "Tb" } ; int unit = 0 ; double l = this . length ; while ( ( l >= 1024.0 ) && ( unit < units . length ) ) { unit += 1 ; l /= 1024.0 ; } return String . format ( "%s (%.1f %s, r%d)" , super . toString ( ) , l , units [ unit ] , this . replication ) ;
public class AbstractRadial { /** * Enables / disables the visibility of the arc that represents the range of measured values * @ param RANGE _ OF _ MEASURED _ VALUES _ VISIBLE */ public void setRangeOfMeasuredValuesVisible ( final boolean RANGE_OF_MEASURED_VALUES_VISIBLE ) { } }
getModel ( ) . setRangeOfMeasuredValuesVisible ( RANGE_OF_MEASURED_VALUES_VISIBLE ) ; init ( getInnerBounds ( ) . width , getInnerBounds ( ) . height ) ; repaint ( getInnerBounds ( ) ) ;
public class WhileyFileParser { /** * Parse an array type , which is of the form : * < pre > * ArrayType : : = Type ' [ ' ' ] ' * < / pre > * @ return */ private Type parseArrayType ( EnclosingScope scope ) { } }
int start = index ; Type element = parseBaseType ( scope ) ; while ( tryAndMatch ( true , LeftSquare ) != null ) { match ( RightSquare ) ; element = annotateSourceLocation ( new Type . Array ( element ) , start ) ; } return element ;
public class HttpOutboundServiceContextImpl { /** * Send the given body buffers for the outgoing request synchronously . * If chunked encoding is set , then each call to this method will be * considered a " chunk " and encoded as such . If the message is * Content - Length defined , then the buffers will simply be sent out with no * modifications . * Note : if headers have not already been sent , then the first call to * this method will send the headers . * @ param body * @ throws IOException * - - if a socket error occurs * @ throws MessageSentException * - - if a finishMessage API was already used */ @ Override public void sendRequestBody ( WsByteBuffer [ ] body ) throws IOException , MessageSentException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . entry ( tc , "sendRequestBody(sync)" ) ; } if ( isMessageSent ( ) ) { throw new MessageSentException ( "Message already sent" ) ; } // if headers haven ' t been sent , set for partial body transfer if ( ! headersSent ( ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Setting partial body true" ) ; } setPartialBody ( true ) ; } getLink ( ) . setAllowReconnect ( true ) ; try { sendOutgoing ( body , getRequestImpl ( ) ) ; } catch ( IOException e ) { // no FFDC necessary reConnect ( e ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . exit ( tc , "sendRequestBody(sync)" ) ; }
public class Lifecycle { /** * Adds a handler to the Lifecycle and starts it if the lifecycle has already been started . * @ param handler The hander to add to the lifecycle * @ param stage The stage to add the lifecycle at * @ throws Exception an exception thrown from handler . start ( ) . If an exception is thrown , the handler is * not * added */ public void addMaybeStartHandler ( Handler handler , Stage stage ) throws Exception { } }
synchronized ( handlers ) { if ( started . get ( ) ) { if ( currStage == null || stage . compareTo ( currStage ) < 1 ) { handler . start ( ) ; } } handlers . get ( stage ) . add ( handler ) ; }
public class HostVsanInternalSystem { /** * Query VSAN system statistics . This is a low level API that gathers low level statistic counters from the system . * The details of the counters remain undocumented and unsupported at this point , and this API remains internal . * The data for this API call mostly comes from VSI , but also other tools like memstats . The caller can control * which counters are being retrieved by providing a list of labels . * The following labels are current supported : - TBD * @ param labels List of labels of counters to retrieve . * @ return JSON string with the results * @ throws RuntimeFault * @ throws RemoteException * @ since 6.0 */ public String queryVsanStatistics ( String [ ] labels ) throws RuntimeFault , RemoteException { } }
return getVimService ( ) . queryVsanStatistics ( getMOR ( ) , labels ) ;
public class DriverFactory { /** * OperatorDesc can describe local reasources , URL , loaded resources and dynamic resources like * groovy code . This method instantiates an Operator based on the OperatorDesc . * @ param operatorDesc * @ return */ public static Operator buildOperator ( OperatorDesc operatorDesc , MetricRegistry metricRegistry , String planPath , FeedPartition feedPartition ) { } }
Operator operator = null ; NitFactory nitFactory = new NitFactory ( ) ; NitDesc nitDesc = nitDescFromDynamic ( operatorDesc ) ; try { if ( nitDesc . getSpec ( ) == NitDesc . NitSpec . GROOVY_CLOSURE ) { operator = new GroovyOperator ( ( Closure ) nitFactory . construct ( nitDesc ) ) ; } else { operator = nitFactory . construct ( nitDesc ) ; } } catch ( NitException e ) { throw new RuntimeException ( e ) ; } operator . setProperties ( operatorDesc . getParameters ( ) ) ; operator . setMetricRegistry ( metricRegistry ) ; operator . setPartitionId ( feedPartition . getPartitionId ( ) ) ; String myName = operatorDesc . getName ( ) ; if ( myName == null ) { myName = operatorDesc . getTheClass ( ) ; if ( myName . indexOf ( "." ) > - 1 ) { String [ ] parts = myName . split ( "\\." ) ; myName = parts [ parts . length - 1 ] ; } } operator . setPath ( planPath + "." + myName ) ; return operator ;
public class FileUtils { /** * 将文件内容作为一个字符串返回 * @ param file * @ param encoding * @ return * @ throws Exception */ static public String fileToString ( String file , String encoding ) throws Exception { } }
FileInputStream fis = new FileInputStream ( file ) ; ByteArrayOutputStream baos = new ByteArrayOutputStream ( ) ; byte [ ] buffer = new byte [ 10240 ] ; int len = fis . read ( buffer , 0 , buffer . length ) ; while ( len > 0 ) { baos . write ( buffer , 0 , len ) ; len = fis . read ( buffer , 0 , buffer . length ) ; } fis . close ( ) ; return baos . toString ( encoding ) ;
public class CurrencyHelper { /** * Try to parse a string value formatted by the { @ link NumberFormat } object * returned from { @ link # getCurrencyFormat ( ECurrency ) } . E . g . * < code > & euro ; 5,00 < / code > * @ param eCurrency * The currency it is about . If < code > null < / code > is provided * { @ link # DEFAULT _ CURRENCY } is used instead . * @ param sTextValue * The string value . It will be parsed unmodified ! * @ param aDefault * The default value to be used in case parsing fails . May be * < code > null < / code > . * @ return The { @ link BigDecimal } value matching the string value or the * passed default value . */ @ Nullable public static BigDecimal parseCurrencyFormatUnchanged ( @ Nullable final ECurrency eCurrency , @ Nullable final String sTextValue , @ Nullable final BigDecimal aDefault ) { } }
final PerCurrencySettings aPCS = getSettings ( eCurrency ) ; final DecimalFormat aCurrencyFormat = aPCS . getCurrencyFormat ( ) ; return parseCurrency ( sTextValue , aCurrencyFormat , aDefault , aPCS . getRoundingMode ( ) ) ;
public class StaticFilesConfiguration { /** * Configures location for static resources * @ param folder the location */ public synchronized void configureExternal ( String folder ) { } }
Assert . notNull ( folder , "'folder' must not be null" ) ; if ( ! externalStaticResourcesSet ) { try { ExternalResource resource = new ExternalResource ( folder ) ; if ( ! resource . getFile ( ) . isDirectory ( ) ) { LOG . error ( "External Static resource location must be a folder" ) ; return ; } if ( staticResourceHandlers == null ) { staticResourceHandlers = new ArrayList < > ( ) ; } staticResourceHandlers . add ( new ExternalResourceHandler ( folder , "index.html" ) ) ; LOG . info ( "External StaticResourceHandler configured with folder = " + folder ) ; } catch ( IOException e ) { LOG . error ( "Error when creating external StaticResourceHandler" , e ) ; } externalStaticResourcesSet = true ; }
public class CRFModel { /** * 维特比后向算法标注 * @ param table */ public void tag ( Table table ) { } }
int size = table . size ( ) ; if ( size == 0 ) return ; int tagSize = id2tag . length ; double [ ] [ ] net = new double [ size ] [ tagSize ] ; for ( int i = 0 ; i < size ; ++ i ) { LinkedList < double [ ] > scoreList = computeScoreList ( table , i ) ; for ( int tag = 0 ; tag < tagSize ; ++ tag ) { net [ i ] [ tag ] = computeScore ( scoreList , tag ) ; } } if ( size == 1 ) { double maxScore = - 1e10 ; int bestTag = 0 ; for ( int tag = 0 ; tag < net [ 0 ] . length ; ++ tag ) { if ( net [ 0 ] [ tag ] > maxScore ) { maxScore = net [ 0 ] [ tag ] ; bestTag = tag ; } } table . setLast ( 0 , id2tag [ bestTag ] ) ; return ; } int [ ] [ ] from = new int [ size ] [ tagSize ] ; double [ ] [ ] maxScoreAt = new double [ 2 ] [ tagSize ] ; // 滚动数组 System . arraycopy ( net [ 0 ] , 0 , maxScoreAt [ 0 ] , 0 , tagSize ) ; // 初始preI = 0 , maxScoreAt [ preI ] [ pre ] = net [ 0 ] [ pre ] int curI = 0 ; for ( int i = 1 ; i < size ; ++ i ) { curI = i & 1 ; int preI = 1 - curI ; for ( int now = 0 ; now < tagSize ; ++ now ) { double maxScore = - 1e10 ; for ( int pre = 0 ; pre < tagSize ; ++ pre ) { double score = maxScoreAt [ preI ] [ pre ] + matrix [ pre ] [ now ] + net [ i ] [ now ] ; if ( score > maxScore ) { maxScore = score ; from [ i ] [ now ] = pre ; maxScoreAt [ curI ] [ now ] = maxScore ; } } net [ i ] [ now ] = maxScore ; } } // 反向回溯最佳路径 double maxScore = - 1e10 ; int maxTag = 0 ; for ( int tag = 0 ; tag < tagSize ; ++ tag ) { if ( maxScoreAt [ curI ] [ tag ] > maxScore ) { maxScore = maxScoreAt [ curI ] [ tag ] ; maxTag = tag ; } } table . setLast ( size - 1 , id2tag [ maxTag ] ) ; maxTag = from [ size - 1 ] [ maxTag ] ; for ( int i = size - 2 ; i > 0 ; -- i ) { table . setLast ( i , id2tag [ maxTag ] ) ; maxTag = from [ i ] [ maxTag ] ; } table . setLast ( 0 , id2tag [ maxTag ] ) ;
public class DocNumberCache { /** * Returns the cache entry for < code > uuid < / code > , or < code > null < / code > if * no entry exists for < code > uuid < / code > . * @ param uuid the key . * @ return cache entry or < code > null < / code > . */ Entry get ( String uuid ) { } }
LRUMap cacheSegment = docNumbers [ getSegmentIndex ( uuid . charAt ( 0 ) ) ] ; // uuid may be invalid // if ( uuid . length ( ) ! = UUID . UUID _ FORMATTED _ LENGTH ) { // return null ; String key = uuid ; Entry entry ; synchronized ( cacheSegment ) { entry = ( Entry ) cacheSegment . get ( key ) ; } if ( log . isDebugEnabled ( ) ) { accesses ++ ; if ( entry == null ) { misses ++ ; } // log at most after 1000 accesses and every 10 seconds if ( accesses > 1000 && System . currentTimeMillis ( ) - lastLog > LOG_INTERVAL ) { long ratio = 100 ; if ( misses != 0 ) { ratio -= misses * 100L / accesses ; } StringBuilder statistics = new StringBuilder ( ) ; int inUse = 0 ; for ( int i = 0 ; i < docNumbers . length ; i ++ ) { inUse += docNumbers [ i ] . size ( ) ; } statistics . append ( "size=" ) . append ( inUse ) ; statistics . append ( "/" ) . append ( docNumbers [ 0 ] . maxSize ( ) * CACHE_SEGMENTS ) ; statistics . append ( ", #accesses=" ) . append ( accesses ) ; statistics . append ( ", #hits=" ) . append ( ( accesses - misses ) ) ; statistics . append ( ", #misses=" ) . append ( misses ) ; statistics . append ( ", cacheRatio=" ) . append ( ratio ) . append ( "%" ) ; log . debug ( statistics . toString ( ) ) ; accesses = 0 ; misses = 0 ; lastLog = System . currentTimeMillis ( ) ; } } return entry ;
public class Graph { /** * Runs a ScatterGather iteration on the graph . * No configuration options are provided . * @ param scatterFunction the scatter function * @ param gatherFunction the gather function * @ param maximumNumberOfIterations maximum number of iterations to perform * @ return the updated Graph after the scatter - gather iteration has converged or * after maximumNumberOfIterations . */ public < M > Graph < K , VV , EV > runScatterGatherIteration ( ScatterFunction < K , VV , M , EV > scatterFunction , org . apache . flink . graph . spargel . GatherFunction < K , VV , M > gatherFunction , int maximumNumberOfIterations ) { } }
return this . runScatterGatherIteration ( scatterFunction , gatherFunction , maximumNumberOfIterations , null ) ;
public class WindowFunctionExpression { /** * Functions to find subexpressions by class . We need to search the * partition by and order by lists . */ @ Override public < aeClass > List < aeClass > findAllSubexpressionsOfClass ( Class < ? extends AbstractExpression > aeClass ) { } }
List < aeClass > list = super . findAllSubexpressionsOfClass ( aeClass ) ; for ( AbstractExpression pbexpr : m_partitionByExpressions ) { list . addAll ( pbexpr . findAllSubexpressionsOfClass ( aeClass ) ) ; } for ( AbstractExpression sortExpr : m_orderByExpressions ) { list . addAll ( sortExpr . findAllSubexpressionsOfClass ( aeClass ) ) ; } for ( AbstractExpression aggExpr : m_args ) { list . addAll ( aggExpr . findAllSubexpressionsOfClass ( aeClass ) ) ; } return list ;
public class PartnerUserService { /** * Deletes the user with the given id . * @ param partnerId The id of the partner the user belongs to * @ param accountId The id of the account for the user * @ param userId The id of the user to delete * @ return This object */ public PartnerUserService delete ( long partnerId , long accountId , long userId ) { } }
HTTP . DELETE ( String . format ( "/v2/partners/%d/accounts/%d/users/%d" , partnerId , accountId , userId ) ) ; return this ;
public class DateSpinner { /** * { @ inheritDoc } */ @ Override public void removeAdapterItemAt ( int index ) { } }
if ( index == getSelectedItemPosition ( ) ) { Calendar date = getSelectedDate ( ) ; selectTemporary ( new DateItem ( formatDate ( date ) , date , NO_ID ) ) ; } super . removeAdapterItemAt ( index ) ;
public class TAEnabledRandomAccessFile { /** * Schliesst die Datei und den FileChannel * @ throws java . io . IOException , wenn das Schliessen schief geht . */ public void close ( ) throws IOException { } }
if ( raf != null ) { // gibt Lock auf datei frei try { if ( this . fileLock != null ) { if ( ! fileLock . release ( ) ) { LOG . error ( "Filelock not release properly" ) ; } } else { LOG . error ( "No Filelock set" ) ; } } finally { // Schliessen der Daten - Datei this . fileLock = null ; raf . close ( ) ; raf = null ; } }
public class JKDateTimeUtil { /** * Gets the difference . * @ param timeFrom the time from * @ param timeTo the time to * @ return the difference */ public static long getDifference ( Time timeFrom , Time timeTo ) { } }
try { DateFormat format = new SimpleDateFormat ( "HH:mm:ss" ) ; // the a means am / pm marker Date date = format . parse ( timeFrom . toString ( ) ) ; Date date2 = format . parse ( timeTo . toString ( ) ) ; long difference = ( date2 . getTime ( ) - date . getTime ( ) ) / 1000 / 60 ; return difference ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; }
public class JmsBytesMessageImpl { /** * Checks to see if the producer has promised not to modify the payload after it ' s been set . * If they have , then throw a JMS exception based on the parameters * @ throws JMSException */ private void checkProducerPromise ( String jmsMethod , String ffdcProbeID ) throws JMSException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "checkProducerPromise" , new Object [ ] { jmsMethod , ffdcProbeID } ) ; // Only proceed if the producer hasn ' t promised not to modify the payload // after setting it . if ( producerWontModifyPayloadAfterSet ) { throw ( JMSException ) JmsErrorUtils . newThrowable ( IllegalStateException . class , // JMS illegal state exception "PROMISE_BROKEN_EXCEPTION_CWSIA0510" , // promise broken new Object [ ] { jmsMethod } , // insert = jms method name null , // no cause , original exception ffdcProbeID , // Probe ID this , // caller ( ? ) tc ) ; // Trace component } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "checkProducerPromise" ) ;
public class ArrayHelper { /** * Check if the passed object is an array or not . * @ param aObject * The object to be checked . May be < code > null < / code > . * @ return < code > true < / code > if the passed object is not < code > null < / code > and * represents an array . */ public static boolean isArray ( @ Nullable final Object aObject ) { } }
return aObject != null && ClassHelper . isArrayClass ( aObject . getClass ( ) ) ;
public class Bond { /** * Returns the geometric 3D center of the bond . * @ return The geometric 3D center of the bond */ @ Override public Point3d get3DCenter ( ) { } }
double xOfCenter = 0 ; double yOfCenter = 0 ; double zOfCenter = 0 ; for ( IAtom atom : atoms ) { xOfCenter += atom . getPoint3d ( ) . x ; yOfCenter += atom . getPoint3d ( ) . y ; zOfCenter += atom . getPoint3d ( ) . z ; } return new Point3d ( xOfCenter / getAtomCount ( ) , yOfCenter / getAtomCount ( ) , zOfCenter / getAtomCount ( ) ) ;
public class Hasher { /** * Hashes system environment . * http : / / docs . oracle . com / javase / tutorial / essential / environment / sysprop . html */ public String hashSystemEnv ( ) { } }
List < String > system = new ArrayList < String > ( ) ; for ( Entry < Object , Object > el : System . getProperties ( ) . entrySet ( ) ) { system . add ( el . getKey ( ) + " " + el . getValue ( ) ) ; } Map < String , String > env = System . getenv ( ) ; for ( Entry < String , String > el : env . entrySet ( ) ) { system . add ( el . getKey ( ) + " " + el . getValue ( ) ) ; } Collections . sort ( system ) ; ByteArrayOutputStream baos = new ByteArrayOutputStream ( ) ; try { DataOutputStream dos = new DataOutputStream ( baos ) ; for ( String s : system ) { dos . write ( s . getBytes ( ) ) ; } } catch ( IOException ex ) { // never } return hashByteArray ( baos . toByteArray ( ) ) ;
public class SubWriterHolderWriter { /** * Add the summary link for the member . * @ param mw the writer for the member being documented * @ param member the member to be documented * @ param contentTree the content tree to which the link will be added */ public void addSummaryLinkComment ( AbstractMemberWriter mw , Element member , Content contentTree ) { } }
List < ? extends DocTree > tags = utils . getFirstSentenceTrees ( member ) ; addSummaryLinkComment ( mw , member , tags , contentTree ) ;