signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class ClassContext { /** * Get a BitSet representing the bytecodes that are used in the given * method . This is useful for prescreening a method for the existence of * particular instructions . Because this step doesn ' t require building a * MethodGen , it is very fast and memory - efficient . It may allow a Detector * to avoid some very expensive analysis , which is a Big Win for the user . * @ param method * the method * @ return the BitSet containing the opcodes which appear in the method , or * null if the method has no code */ @ CheckForNull static public BitSet getBytecodeSet ( JavaClass clazz , Method method ) { } }
XMethod xmethod = XFactory . createXMethod ( clazz , method ) ; if ( cachedBitsets ( ) . containsKey ( xmethod ) ) { return cachedBitsets ( ) . get ( xmethod ) ; } Code code = method . getCode ( ) ; if ( code == null ) { return null ; } byte [ ] instructionList = code . getCode ( ) ; // Create callback UnpackedBytecodeCallback callback = new UnpackedBytecodeCallback ( instructionList . length ) ; // Scan the method . BytecodeScanner scanner = new BytecodeScanner ( ) ; scanner . scan ( instructionList , callback ) ; UnpackedCode unpackedCode = callback . getUnpackedCode ( ) ; BitSet result = null ; if ( unpackedCode != null ) { result = unpackedCode . getBytecodeSet ( ) ; } cachedBitsets ( ) . put ( xmethod , result ) ; return result ;
public class DateUtils { /** * Gets a Date fragment for any unit . * @ param date the date to work with , not null * @ param fragment the Calendar field part of date to calculate * @ param unit the time unit * @ return number of units within the fragment of the date * @ throws IllegalArgumentException if the date is < code > null < / code > or * fragment is not supported * @ since 2.4 */ @ GwtIncompatible ( "incompatible method" ) private static long getFragment ( final Date date , final int fragment , final TimeUnit unit ) { } }
validateDateNotNull ( date ) ; final Calendar calendar = Calendar . getInstance ( ) ; calendar . setTime ( date ) ; return getFragment ( calendar , fragment , unit ) ;
public class DTDSubsetImpl { /** * Convenience methods used by other classes */ public static void throwNotationException ( NotationDeclaration oldDecl , NotationDeclaration newDecl ) throws XMLStreamException { } }
throw new WstxParsingException ( MessageFormat . format ( ErrorConsts . ERR_DTD_NOTATION_REDEFD , new Object [ ] { newDecl . getName ( ) , oldDecl . getLocation ( ) . toString ( ) } ) , newDecl . getLocation ( ) ) ;
public class PortletFilterChainProxy { /** * Returns the first filter chain matching the supplied URL . * @ param request the request to match * @ return an ordered array of Filters defining the filter chain */ private List < PortletFilter > getFilters ( PortletRequest request ) { } }
for ( PortletSecurityFilterChain chain : filterChains ) { if ( chain . matches ( request ) ) { return chain . getFilters ( ) ; } } return null ;
public class ChannelRecoveryServiceRestAdapter { /** * Query to recover a channel that previously rejected messages or was * unreachable . < br > * The interface has been introduced for controlled bounce proxies . * @ param ccid * identifier of the channel to recover * @ param bpId * identifier of the bounce proxy handling this channel * @ param statusParam * the channel status * @ param atmosphereTrackingId * the atmosphere tracking id * @ return * response builder object for either empty response * ( ( a ) if channel recovered on previously assigned bounce proxy or * ( b ) bounce proxy reachable by bounce proxy controller , * but possibly no by cluster controllers ) , * with ok status ( ( c ) if channel was moved to another bounce proxy ) , * or for a created resource ( ( d ) in case bounce proxy controller lost data ) . */ @ PUT @ Path ( "/{ccid: [A-Z,a-z,0-9,_,\\-,\\.]+}" ) public Response recoverChannel ( @ PathParam ( "ccid" ) String ccid , @ QueryParam ( "bp" ) String bpId , @ QueryParam ( "status" ) ChannelStatusParam statusParam , @ HeaderParam ( ChannelServiceConstants . X_ATMOSPHERE_TRACKING_ID ) String atmosphereTrackingId ) { } }
if ( ccid == null || ccid . isEmpty ( ) ) throw new JoynrHttpException ( Status . BAD_REQUEST , JOYNRMESSAGINGERROR_CHANNELNOTSET ) ; try { Channel channel = channelService . getChannel ( ccid ) ; if ( channel == null ) { // bounce proxy controller lost data channel = channelService . createChannel ( ccid , atmosphereTrackingId ) ; return Response . created ( channel . getLocation ( ) ) . header ( "bp" , channel . getBounceProxy ( ) . getId ( ) ) . build ( ) ; } else { if ( ! bpId . equals ( channel . getBounceProxy ( ) . getId ( ) ) ) { // channel was moved to another bounce proxy return Response . ok ( ) . header ( "Location" , channel . getLocation ( ) . toString ( ) ) . header ( "bp" , channel . getBounceProxy ( ) . getId ( ) ) . build ( ) ; } else { // bounce proxy exists , but was unreachable for cluster // controllers if ( statusParam == null ) { // we need a status to process how it is going on throw new JoynrHttpException ( Status . BAD_REQUEST . getStatusCode ( ) , 0 , "No channel status set" ) ; } else { ChannelStatus status = statusParam . getStatus ( ) ; if ( status . equals ( ChannelStatus . REJECTING_LONG_POLLS ) ) { // recover the channel on the previously assigned // bounce proxy channelService . recoverChannel ( ccid , atmosphereTrackingId ) ; return Response . noContent ( ) . build ( ) ; } else if ( status . equals ( ChannelStatus . UNREACHABLE ) ) { if ( channelService . isBounceProxyForChannelResponding ( ccid ) ) { // bounce proxy reachable by bounce proxy // controller , but possibly not by cluster // controllers errorNotifier . alertBounceProxyUnreachable ( ccid , bpId , request . getRemoteAddr ( ) , "Bounce Proxy unreachable for Cluster Controller" ) ; return Response . noContent ( ) . build ( ) ; } else { // bounce proxy dead errorNotifier . alertBounceProxyUnreachable ( ccid , bpId , request . getLocalAddr ( ) , "Bounce Proxy unreachable for Channel Service" ) ; // TODO // channelServiceDelegate . markBounceProxyAsUnreachable ( bpId ) ; // create new channel on different bounce proxy Channel newChannel = channelService . createChannel ( ccid , atmosphereTrackingId ) ; return Response . created ( newChannel . getLocation ( ) ) . header ( "bp" , newChannel . getBounceProxy ( ) . getId ( ) ) . build ( ) ; } } else { throw new JoynrHttpException ( Status . BAD_REQUEST . getStatusCode ( ) , 0 , "Unknown channel status '" + status + "'" ) ; } } } } } catch ( Exception e ) { throw new WebApplicationException ( e , Status . INTERNAL_SERVER_ERROR ) ; }
public class TokenQueue { /** * 下一个 " \ " 的位置 , 如果没有 , 则返回空 , 考虑转义和字符串文本 */ private int nextXpathNodeSeperator ( ) { } }
int start = pos ; boolean inQuote = false ; char last = 0 ; do { if ( queue . length ( ) - start == 0 ) { return - 1 ; } Character c = queue . charAt ( start ++ ) ; if ( last == 0 || last != ESC ) { if ( ( c . equals ( '\'' ) || c . equals ( '"' ) ) ) inQuote = ! inQuote ; if ( inQuote ) continue ; } if ( c == '/' ) { return start ; } last = c ; } while ( true ) ;
public class PolylineSplitMerge { /** * Returns the previous corner in the list */ Element < Corner > previous ( Element < Corner > e ) { } }
if ( e . previous == null ) { return list . getTail ( ) ; } else { return e . previous ; }
public class Resource { /** * Subtracts a given resource from the current resource . The results is never negative . */ public Resource subtractAbsolute ( Resource other ) { } }
double cpuDifference = this . getCpu ( ) - other . getCpu ( ) ; double extraCpu = Math . max ( 0 , cpuDifference ) ; ByteAmount ramDifference = this . getRam ( ) . minus ( other . getRam ( ) ) ; ByteAmount extraRam = ByteAmount . ZERO . max ( ramDifference ) ; ByteAmount diskDifference = this . getDisk ( ) . minus ( other . getDisk ( ) ) ; ByteAmount extraDisk = ByteAmount . ZERO . max ( diskDifference ) ; return new Resource ( extraCpu , extraRam , extraDisk ) ;
public class DBLogger { /** * THese always result in the session being closed ! * @ param msg The error message * @ param t The Throwable to report * @ return Fatal data store exception . */ public static RuntimeException newFatalDataStore ( String msg , Throwable t ) { } }
return newEx ( FATAL_DATA_STORE_EXCEPTION , msg , t ) ;
public class ObjectCacheUnitImpl { /** * Create a DistributedObjectCache from a cacheConfig object . * Entry ( only call this method once per cache name ) config ! = null config . distributedObjectCache = = null Exit * newly created DistributedObjectCache * @ param config * @ return The returned object will be a DistributedMap , DistributedLockingMap or DistributedNioMap . */ private DistributedObjectCache createDistributedObjectCache ( CacheConfig config ) { } }
final String methodName = "createDistributedObjectCache()" ; if ( tc . isEntryEnabled ( ) ) Tr . entry ( tc , methodName + " cacheName=" + ( config != null ? config . getCacheName ( ) : "null" ) ) ; DCache dCache = ServerCache . createCache ( config . getCacheName ( ) , config ) ; config . setCache ( dCache ) ; DistributedObjectCache distributedObjectCache = null ; if ( config . isEnableNioSupport ( ) ) { distributedObjectCache = new DistributedNioMapImpl ( dCache ) ; } else { distributedObjectCache = new DistributedMapImpl ( dCache ) ; } if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , methodName + " distributedObjectCache=" + distributedObjectCache ) ; return distributedObjectCache ;
public class HomographyTotalLeastSquares { /** * Constructs equation for elements 6 to 8 in H */ void constructA678 ( ) { } }
final int N = X1 . numRows ; // Pseudo - inverse of hat ( p ) computePseudo ( X1 , P_plus ) ; DMatrixRMaj PPpXP = new DMatrixRMaj ( 1 , 1 ) ; DMatrixRMaj PPpYP = new DMatrixRMaj ( 1 , 1 ) ; computePPXP ( X1 , P_plus , X2 , 0 , PPpXP ) ; computePPXP ( X1 , P_plus , X2 , 1 , PPpYP ) ; DMatrixRMaj PPpX = new DMatrixRMaj ( 1 , 1 ) ; DMatrixRMaj PPpY = new DMatrixRMaj ( 1 , 1 ) ; computePPpX ( X1 , P_plus , X2 , 0 , PPpX ) ; computePPpX ( X1 , P_plus , X2 , 1 , PPpY ) ; // = = = = = Equations 20 computeEq20 ( X2 , X1 , XP_bar ) ; // = = = = = Equation 21 A . reshape ( N * 2 , 3 ) ; double XP_bar_x = XP_bar [ 0 ] ; double XP_bar_y = XP_bar [ 1 ] ; double YP_bar_x = XP_bar [ 2 ] ; double YP_bar_y = XP_bar [ 3 ] ; // Compute the top half of A for ( int i = 0 , index = 0 , indexA = 0 ; i < N ; i ++ , index += 2 ) { double x = - X2 . data [ i * 2 ] ; double P_hat_x = X1 . data [ index ] ; // hat { P } [ 0] double P_hat_y = X1 . data [ index + 1 ] ; // hat { P } [ 1] // x ' * hat { p } - bar { X * P } - PPpXP A . data [ indexA ++ ] = x * P_hat_x - XP_bar_x - PPpXP . data [ index ] ; A . data [ indexA ++ ] = x * P_hat_y - XP_bar_y - PPpXP . data [ index + 1 ] ; // X ' * 1 - PPx1 A . data [ indexA ++ ] = x - PPpX . data [ i ] ; } // Compute the bottom half of A for ( int i = 0 , index = 0 , indexA = N * 3 ; i < N ; i ++ , index += 2 ) { double x = - X2 . data [ i * 2 + 1 ] ; double P_hat_x = X1 . data [ index ] ; double P_hat_y = X1 . data [ index + 1 ] ; // x ' * hat { p } - bar { X * P } - PPpXP A . data [ indexA ++ ] = x * P_hat_x - YP_bar_x - PPpYP . data [ index ] ; A . data [ indexA ++ ] = x * P_hat_y - YP_bar_y - PPpYP . data [ index + 1 ] ; // X ' * 1 - PPx1 A . data [ indexA ++ ] = x - PPpY . data [ i ] ; }
public class DaoTemplate { /** * 根据SQL语句查询结果 < br > * SQL语句可以是非完整SQL语句 , 可以只提供查询的条件部分 ( 例如WHERE部分 ) < br > * 此方法会自动补全SELECT * FROM [ tableName ] 部分 , 这样就无需关心表名 , 直接提供条件即可 * @ param sql SQL语句 * @ param params SQL占位符中对应的参数 * @ return 记录 * @ throws SQLException SQL执行异常 */ public List < Entity > findBySql ( String sql , Object ... params ) throws SQLException { } }
String selectKeyword = StrUtil . subPre ( sql . trim ( ) , 6 ) . toLowerCase ( ) ; if ( false == "select" . equals ( selectKeyword ) ) { sql = "SELECT * FROM " + this . tableName + " " + sql ; } return db . query ( sql , new EntityListHandler ( ) , params ) ;
public class AbstractConsumerQuery { /** * / * ( non - Javadoc ) * @ see net . timewalker . ffmq4 . network . packet . AbstractPacket # serializeTo ( net . timewalker . ffmq4 . utils . RawDataOutputStream ) */ @ Override protected void serializeTo ( RawDataBuffer out ) { } }
super . serializeTo ( out ) ; out . writeInt ( consumerId . asInt ( ) ) ;
public class HashFunctions { /** * Fowler - Noll - Vo 32 bit hash ( FNV - 1a ) for long key . This is big - endian version ( native endianess of JVM ) . < br / > < p / > * < h3 > Algorithm < / h3 > < p / > * < pre > * hash = offset _ basis * for each octet _ of _ data to be hashed * hash = hash xor octet _ of _ data * hash = hash * FNV _ prime * return hash < / pre > * < h3 > Links < / h3 > < a href = " http : / / www . isthe . com / chongo / tech / comp / fnv / " > http : / / www . isthe . com / chongo / tech / comp / fnv / < / a > < br / > * < a href = " http : / / en . wikipedia . org / wiki / Fowler % E2%80%93Noll % E2%80%93Vo _ hash _ function " > http : / / en . wikipedia . org / wiki / Fowler % E2%80%93Noll % E2%80%93Vo _ hash _ function < / a > < br / > * @ param c long key to be hashed * @ return hash 32 bit hash */ public static int FVN64to32hash ( long c ) { } }
long hash = FNV_BASIS ; hash ^= c >>> 56 ; hash *= FNV_PRIME_32 ; hash ^= 0xFFL & ( c >>> 48 ) ; hash *= FNV_PRIME_32 ; hash ^= 0xFFL & ( c >>> 40 ) ; hash *= FNV_PRIME_32 ; hash ^= 0xFFL & ( c >>> 32 ) ; hash *= FNV_PRIME_32 ; hash ^= 0xFFL & ( c >>> 24 ) ; hash *= FNV_PRIME_32 ; hash ^= 0xFFL & ( c >>> 16 ) ; hash *= FNV_PRIME_32 ; hash ^= 0xFFL & ( c >>> 8 ) ; hash *= FNV_PRIME_32 ; hash ^= 0xFFL & c ; hash *= FNV_PRIME_32 ; return ( int ) hash ;
public class ArrayBasedStrategy { /** * - - - REMOVE ALL ENDPOINTS OF THE SPECIFIED NODE - - - */ @ Override public boolean remove ( String nodeID ) { } }
Endpoint endpoint ; boolean found = false ; for ( int i = 0 ; i < endpoints . length ; i ++ ) { endpoint = endpoints [ i ] ; if ( nodeID . equals ( endpoint . getNodeID ( ) ) ) { found = true ; if ( endpoints . length == 1 ) { endpoints = new Endpoint [ 0 ] ; } else { Endpoint [ ] copy = new Endpoint [ endpoints . length - 1 ] ; System . arraycopy ( endpoints , 0 , copy , 0 , i ) ; System . arraycopy ( endpoints , i + 1 , copy , i , endpoints . length - i - 1 ) ; endpoints = copy ; i -- ; } } } // Remove from cache if ( found ) { endpointCache . remove ( nodeID ) ; } return found ;
public class MediaApi { /** * Complete an interaction * Marks the specified interaction as complete . * @ param mediatype The media channel . ( required ) * @ param id The ID of the interaction to complete . ( required ) * @ param completeData Request parameters . ( optional ) * @ return ApiSuccessResponse * @ throws ApiException If fail to call the API , e . g . server error or cannot deserialize the response body */ public ApiSuccessResponse complete ( String mediatype , String id , CompleteData completeData ) throws ApiException { } }
ApiResponse < ApiSuccessResponse > resp = completeWithHttpInfo ( mediatype , id , completeData ) ; return resp . getData ( ) ;
public class CDDImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public void setXocUnits ( Integer newXocUnits ) { } }
Integer oldXocUnits = xocUnits ; xocUnits = newXocUnits ; if ( eNotificationRequired ( ) ) eNotify ( new ENotificationImpl ( this , Notification . SET , AfplibPackage . CDD__XOC_UNITS , oldXocUnits , xocUnits ) ) ;
public class GeneralizedCounter { /** * for testing purposes only */ public static void main ( String [ ] args ) { } }
Object [ ] a1 = new Object [ ] { "a" , "b" } ; Object [ ] a2 = new Object [ ] { "a" , "b" } ; System . out . println ( a1 . equals ( a2 ) ) ; GeneralizedCounter < String > gc = new GeneralizedCounter < String > ( 3 ) ; gc . incrementCount ( Arrays . asList ( new String [ ] { "a" , "j" , "x" } ) , 3.0 ) ; gc . incrementCount ( Arrays . asList ( new String [ ] { "a" , "l" , "x" } ) , 3.0 ) ; gc . incrementCount ( Arrays . asList ( new String [ ] { "b" , "k" , "y" } ) , 3.0 ) ; gc . incrementCount ( Arrays . asList ( new String [ ] { "b" , "k" , "z" } ) , 3.0 ) ; System . out . println ( "incremented counts." ) ; System . out . println ( gc . dumpKeys ( ) ) ; System . out . println ( "string representation of generalized counter:" ) ; System . out . println ( gc . toString ( ) ) ; gc . printKeySet ( ) ; System . out . println ( "entry set:\n" + gc . entrySet ( ) ) ; arrayPrintDouble ( gc . getCounts ( Arrays . asList ( new String [ ] { "a" , "j" , "x" } ) ) ) ; arrayPrintDouble ( gc . getCounts ( Arrays . asList ( new String [ ] { "a" , "j" , "z" } ) ) ) ; arrayPrintDouble ( gc . getCounts ( Arrays . asList ( new String [ ] { "b" , "k" , "w" } ) ) ) ; arrayPrintDouble ( gc . getCounts ( Arrays . asList ( new String [ ] { "b" , "k" , "z" } ) ) ) ; GeneralizedCounter < String > gc1 = gc . conditionalize ( Arrays . asList ( new String [ ] { "a" } ) ) ; gc1 . incrementCount ( Arrays . asList ( new String [ ] { "j" , "x" } ) ) ; gc1 . incrementCount2D ( "j" , "z" ) ; GeneralizedCounter < String > gc2 = gc1 . conditionalize ( Arrays . asList ( new String [ ] { "j" } ) ) ; gc2 . incrementCount1D ( "x" ) ; System . out . println ( "Pretty-printing gc after incrementing gc1:" ) ; gc . prettyPrint ( ) ; System . out . println ( "Total: " + gc . totalCount ( ) ) ; gc1 . printKeySet ( ) ; System . out . println ( "another entry set:\n" + gc1 . entrySet ( ) ) ; ClassicCounter < List < String > > c = gc . counterView ( ) ; System . out . println ( "string representation of counter view:" ) ; System . out . println ( c . toString ( ) ) ; double d1 = c . getCount ( Arrays . asList ( new String [ ] { "a" , "j" , "x" } ) ) ; double d2 = c . getCount ( Arrays . asList ( new String [ ] { "a" , "j" , "w" } ) ) ; System . out . println ( d1 + " " + d2 ) ; ClassicCounter < List < String > > c1 = gc1 . counterView ( ) ; System . out . println ( "Count of {j,x} -- should be 3.0\t" + c1 . getCount ( Arrays . asList ( new String [ ] { "j" , "x" } ) ) ) ; System . out . println ( c . keySet ( ) + " size " + c . keySet ( ) . size ( ) ) ; System . out . println ( c1 . keySet ( ) + " size " + c1 . keySet ( ) . size ( ) ) ; System . out . println ( c1 . equals ( c ) ) ; System . out . println ( c . equals ( c1 ) ) ; System . out . println ( c . equals ( c ) ) ; System . out . println ( "### testing equality of regular Counter..." ) ; ClassicCounter < String > z1 = new ClassicCounter < String > ( ) ; ClassicCounter < String > z2 = new ClassicCounter < String > ( ) ; z1 . incrementCount ( "a1" ) ; z1 . incrementCount ( "a2" ) ; z2 . incrementCount ( "b" ) ; System . out . println ( z1 . equals ( z2 ) ) ; System . out . println ( z1 . toString ( ) ) ; System . out . println ( z1 . keySet ( ) . toString ( ) ) ;
public class AmazonSnowballClient { /** * Creates an address for a Snowball to be shipped to . In most regions , addresses are validated at the time of * creation . The address you provide must be located within the serviceable area of your region . If the address is * invalid or unsupported , then an exception is thrown . * @ param createAddressRequest * @ return Result of the CreateAddress operation returned by the service . * @ throws InvalidAddressException * The address provided was invalid . Check the address with your region ' s carrier , and try again . * @ throws UnsupportedAddressException * The address is either outside the serviceable area for your region , or an error occurred . Check the * address with your region ' s carrier and try again . If the issue persists , contact AWS Support . * @ sample AmazonSnowball . CreateAddress * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / snowball - 2016-06-30 / CreateAddress " target = " _ top " > AWS API * Documentation < / a > */ @ Override public CreateAddressResult createAddress ( CreateAddressRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeCreateAddress ( request ) ;
public class AmazonElasticLoadBalancingClient { /** * Replaces the set of policies associated with the specified port on which the EC2 instance is listening with a new * set of policies . At this time , only the back - end server authentication policy type can be applied to the instance * ports ; this policy type is composed of multiple public key policies . * Each time you use < code > SetLoadBalancerPoliciesForBackendServer < / code > to enable the policies , use the * < code > PolicyNames < / code > parameter to list the policies that you want to enable . * You can use < a > DescribeLoadBalancers < / a > or < a > DescribeLoadBalancerPolicies < / a > to verify that the policy is * associated with the EC2 instance . * For more information about enabling back - end instance authentication , see < a href = * " http : / / docs . aws . amazon . com / elasticloadbalancing / latest / classic / elb - create - https - ssl - load - balancer . html # configure _ backendauth _ clt " * > Configure Back - end Instance Authentication < / a > in the < i > Classic Load Balancers Guide < / i > . For more information * about Proxy Protocol , see < a * href = " http : / / docs . aws . amazon . com / elasticloadbalancing / latest / classic / enable - proxy - protocol . html " > Configure Proxy * Protocol Support < / a > in the < i > Classic Load Balancers Guide < / i > . * @ param setLoadBalancerPoliciesForBackendServerRequest * Contains the parameters for SetLoadBalancerPoliciesForBackendServer . * @ return Result of the SetLoadBalancerPoliciesForBackendServer operation returned by the service . * @ throws LoadBalancerNotFoundException * The specified load balancer does not exist . * @ throws PolicyNotFoundException * One or more of the specified policies do not exist . * @ throws InvalidConfigurationRequestException * The requested configuration change is not valid . * @ sample AmazonElasticLoadBalancing . SetLoadBalancerPoliciesForBackendServer * @ see < a * href = " http : / / docs . aws . amazon . com / goto / WebAPI / elasticloadbalancing - 2012-06-01 / SetLoadBalancerPoliciesForBackendServer " * target = " _ top " > AWS API Documentation < / a > */ @ Override public SetLoadBalancerPoliciesForBackendServerResult setLoadBalancerPoliciesForBackendServer ( SetLoadBalancerPoliciesForBackendServerRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeSetLoadBalancerPoliciesForBackendServer ( request ) ;
public class ActionMenuView { /** * Measure a child view to fit within cell - based formatting . The child ' s width * will be measured to a whole multiple of cellSize . * < p > Sets the expandable and cellsUsed fields of LayoutParams . * @ param child Child to measure * @ param cellSize Size of one cell * @ param cellsRemaining Number of cells remaining that this view can expand to fill * @ param parentHeightMeasureSpec MeasureSpec used by the parent view * @ param parentHeightPadding Padding present in the parent view * @ return Number of cells this child was measured to occupy */ static int measureChildForCells ( View child , int cellSize , int cellsRemaining , int parentHeightMeasureSpec , int parentHeightPadding ) { } }
final LayoutParams lp = ( LayoutParams ) child . getLayoutParams ( ) ; final int childHeightSize = MeasureSpec . getSize ( parentHeightMeasureSpec ) - parentHeightPadding ; final int childHeightMode = MeasureSpec . getMode ( parentHeightMeasureSpec ) ; final int childHeightSpec = MeasureSpec . makeMeasureSpec ( childHeightSize , childHeightMode ) ; int cellsUsed = 0 ; if ( cellsRemaining > 0 ) { final int childWidthSpec = MeasureSpec . makeMeasureSpec ( cellSize * cellsRemaining , MeasureSpec . AT_MOST ) ; child . measure ( childWidthSpec , childHeightSpec ) ; final int measuredWidth = child . getMeasuredWidth ( ) ; cellsUsed = measuredWidth / cellSize ; if ( measuredWidth % cellSize != 0 ) cellsUsed ++ ; } final ActionMenuItemView itemView = child instanceof ActionMenuItemView ? ( ActionMenuItemView ) child : null ; final boolean expandable = ! lp . isOverflowButton && itemView != null && itemView . hasText ( ) ; lp . expandable = expandable ; lp . cellsUsed = cellsUsed ; final int targetWidth = cellsUsed * cellSize ; child . measure ( MeasureSpec . makeMeasureSpec ( targetWidth , MeasureSpec . EXACTLY ) , childHeightSpec ) ; return cellsUsed ;
public class PerfCounterMap { /** * Gets a performance counter . * @ param counter the name of the counter to retrieve ( typically the procedure name ) . A new counter is created on the fly if none previously existed in the map . */ public PerfCounter get ( String counter ) { } }
// Admited : could get a little race condition at the very beginning , but all that ' ll happen is that we ' ll lose a handful of tracking event , a loss far outweighed by overall reduced contention . if ( ! this . Counters . containsKey ( counter ) ) this . Counters . put ( counter , new PerfCounter ( false ) ) ; return this . Counters . get ( counter ) ;
public class U { /** * Documented , # has */ public static < K , V > boolean has ( final Map < K , V > object , final K key ) { } }
return object . containsKey ( key ) ;
public class AipFace { /** * 人脸检测接口 * @ param image - 图片信息 ( * * 总数据大小应小于10M * * ) , 图片上传方式根据image _ type来判断 * @ param imageType - 图片类型 * * BASE64 * * : 图片的base64值 , base64编码后的图片数据 , 需urlencode , 编码后的图片大小不超过2M ; * * URL * * : 图片的 URL地址 ( 可能由于网络等原因导致下载图片时间过长 ) * * ; FACE _ TOKEN * * : 人脸图片的唯一标识 , 调用人脸检测接口时 , 会为每个人脸图片赋予一个唯一的FACE _ TOKEN , 同一张图片多次检测得到的FACE _ TOKEN是同一个 * @ param options - 可选参数对象 , key : value都为string类型 * options - options列表 : * face _ field 包括 * * age , beauty , expression , faceshape , gender , glasses , landmark , race , quality , facetype信息 * * < br > 逗号分隔 . 默认只返回face _ token 、 人脸框 、 概率和旋转角度 * max _ face _ num 最多处理人脸的数目 , 默认值为1 , 仅检测图片中面积最大的那个人脸 ; * * 最大值10 * * , 检测图片中面积最大的几张人脸 。 * face _ type 人脸的类型 * * LIVE * * 表示生活照 : 通常为手机 、 相机拍摄的人像图片 、 或从网络获取的人像图片等 * * IDCARD * * 表示身份证芯片照 : 二代身份证内置芯片中的人像照片 * * WATERMARK * * 表示带水印证件照 : 一般为带水印的小图 , 如公安网小图 * * CERT * * 表示证件照片 : 如拍摄的身份证 、 工卡 、 护照 、 学生证等证件图片 默认 * * LIVE * * * @ return JSONObject */ public JSONObject detect ( String image , String imageType , HashMap < String , String > options ) { } }
AipRequest request = new AipRequest ( ) ; preOperation ( request ) ; request . addBody ( "image" , image ) ; request . addBody ( "image_type" , imageType ) ; if ( options != null ) { request . addBody ( options ) ; } request . setUri ( FaceConsts . DETECT ) ; request . setBodyFormat ( EBodyFormat . RAW_JSON ) ; postOperation ( request ) ; return requestServer ( request ) ;
public class Dates { /** * Returns an instance of < code > java . util . Calendar < / code > that is suitably * initialised for working with the specified date . * @ param date a date instance * @ return a < code > java . util . Calendar < / code > */ public static Calendar getCalendarInstance ( final Date date ) { } }
Calendar instance ; if ( date instanceof DateTime ) { final DateTime dateTime = ( DateTime ) date ; if ( dateTime . getTimeZone ( ) != null ) { instance = Calendar . getInstance ( dateTime . getTimeZone ( ) ) ; } else if ( dateTime . isUtc ( ) ) { instance = Calendar . getInstance ( TimeZones . getUtcTimeZone ( ) ) ; } else { // a date - time without a timezone but not UTC is floating instance = Calendar . getInstance ( ) ; } } else { instance = Calendar . getInstance ( TimeZones . getDateTimeZone ( ) ) ; } return instance ;
public class SipAnnotationDeploymentProcessor { /** * Process web annotations . */ @ Override public void deploy ( final DeploymentPhaseContext phaseContext ) throws DeploymentUnitProcessingException { } }
final DeploymentUnit deploymentUnit = phaseContext . getDeploymentUnit ( ) ; // Commented for http : / / code . google . com / p / sipservlets / issues / detail ? id = 168 // When no sip . xml but annotations only , Application is not recognized as SIP App by AS7 // SipMetaData sipMetaData = deploymentUnit . getAttachment ( SipMetaData . ATTACHMENT _ KEY ) ; // if ( sipMetaData = = null ) { // return ; SipAnnotationMetaData sipAnnotationsMetaData = deploymentUnit . getAttachment ( SipAnnotationMetaData . ATTACHMENT_KEY ) ; if ( sipAnnotationsMetaData == null ) { sipAnnotationsMetaData = new SipAnnotationMetaData ( ) ; deploymentUnit . putAttachment ( SipAnnotationMetaData . ATTACHMENT_KEY , sipAnnotationsMetaData ) ; } Map < ResourceRoot , Index > indexes = AnnotationIndexUtils . getAnnotationIndexes ( deploymentUnit ) ; // Process components for ( final Entry < ResourceRoot , Index > entry : indexes . entrySet ( ) ) { if ( logger . isDebugEnabled ( ) ) logger . debug ( "doDeploy(): processing annotations from " + entry . getKey ( ) . getRootName ( ) ) ; final Index jarIndex = entry . getValue ( ) ; SipMetaData sipMetaData = processAnnotations ( sipAnnotationsMetaData , jarIndex ) ; if ( sipMetaData != null ) { // https : / / github . com / Mobicents / sip - servlets / issues / 68 Adding only the ones that actually have annotations // to save memory and computing time later on sipAnnotationsMetaData . put ( entry . getKey ( ) . getRootName ( ) , sipMetaData ) ; } }
public class DataLoader { /** * Creates new DataLoader with the specified batch loader function and default options * ( batching , caching and unlimited batch size ) where the batch loader function returns a list of * { @ link org . dataloader . Try } objects . * If its important you to know the exact status of each item in a batch call and whether it threw exceptions then * you can use this form to create the data loader . * Using Try objects allows you to capture a value returned or an exception that might * have occurred trying to get a value . . * @ param batchLoadFunction the batch load function to use that uses { @ link org . dataloader . Try } objects * @ param < K > the key type * @ param < V > the value type * @ return a new DataLoader */ public static < K , V > DataLoader < K , V > newDataLoaderWithTry ( BatchLoaderWithContext < K , Try < V > > batchLoadFunction ) { } }
return newDataLoaderWithTry ( batchLoadFunction , null ) ;
public class AbstractInputHandler { /** * Merges all Maps into one single map . * All maps are merged according to next algorithm : * prefix < / class name stored in map > . < / map key > * Fields encodedQuery and addPrefix are not used and might be removed before final release * @ param encodedQuery Original SQL string * @ param mapList List of Maps which should be merged * @ param addPrefix Specifies if prefix should be added to the beginning * @ return Merged Map */ protected Map < String , Object > mergeMaps ( String encodedQuery , List < Map < String , Object > > mapList , boolean addPrefix ) { } }
Map < String , Object > mergedMap = new HashMap < String , Object > ( ) ; String className = null ; for ( Map < String , Object > map : mapList ) { className = InputUtils . getClassName ( map ) ; for ( String key : map . keySet ( ) ) { if ( InputUtils . isClassNameKey ( key ) == false ) { if ( className != null && addPrefix == true ) { mergedMap . put ( InputUtils . addClassName ( className . toLowerCase ( ) , key . toLowerCase ( ) ) , map . get ( key ) ) ; } else { mergedMap . put ( key . toLowerCase ( ) , map . get ( key ) ) ; } } } } return mergedMap ;
public class HttpRequest { /** * 执行Reuqest请求 * @ param isAsync 是否异步 * @ return this */ public HttpResponse execute ( boolean isAsync ) { } }
// 初始化URL urlWithParamIfGet ( ) ; // 编码URL if ( this . encodeUrlParams ) { this . url = HttpUtil . encodeParams ( this . url , this . charset ) ; } // 初始化 connection initConnecton ( ) ; // 发送请求 send ( ) ; // 手动实现重定向 HttpResponse httpResponse = sendRedirectIfPosible ( ) ; // 获取响应 if ( null == httpResponse ) { httpResponse = new HttpResponse ( this . httpConnection , this . charset , isAsync , isIgnoreResponseBody ( ) ) ; } return httpResponse ;
public class MetricsImpl { /** * Retrieve metric data . * Gets metric values for multiple metrics . * @ param appId ID of the application . This is Application ID from the API Access settings blade in the Azure portal . * @ param body The batched metrics query . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the List & lt ; MetricsResultsItem & gt ; object */ public Observable < ServiceResponse < List < MetricsResultsItem > > > getMultipleWithServiceResponseAsync ( String appId , List < MetricsPostBodySchema > body ) { } }
if ( appId == null ) { throw new IllegalArgumentException ( "Parameter appId is required and cannot be null." ) ; } if ( body == null ) { throw new IllegalArgumentException ( "Parameter body is required and cannot be null." ) ; } Validator . validate ( body ) ; return service . getMultiple ( appId , body , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < List < MetricsResultsItem > > > > ( ) { @ Override public Observable < ServiceResponse < List < MetricsResultsItem > > > call ( Response < ResponseBody > response ) { try { ServiceResponse < List < MetricsResultsItem > > clientResponse = getMultipleDelegate ( response ) ; return Observable . just ( clientResponse ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ;
public class SystemStreamRuleImpl { /** * ( non - Javadoc ) * @ see * ch . powerunit . rules . TestListenerRule # onStart ( ch . powerunit . TestContext ) */ @ Override public void onStart ( TestContext < Object > context ) { } }
oldErr = System . err ; oldOut = System . out ; try { System . setErr ( remplacementErr ) ; System . setOut ( remplacementOut ) ; } catch ( SecurityException e ) { // ignored }
public class ImageZoomPanel { /** * Change the image being displayed . * @ param image The new image which will be displayed . */ public synchronized void setBufferedImage ( BufferedImage image ) { } }
// assume the image was initially set before the GUI was invoked if ( checkEventDispatch && this . img != null ) { if ( ! SwingUtilities . isEventDispatchThread ( ) ) throw new RuntimeException ( "Changed image when not in GUI thread?" ) ; } this . img = image ; if ( image != null ) updateSize ( image . getWidth ( ) , image . getHeight ( ) ) ;
public class BatchGetDeploymentGroupsRequest { /** * The names of the deployment groups . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setDeploymentGroupNames ( java . util . Collection ) } or { @ link # withDeploymentGroupNames ( java . util . Collection ) } * if you want to override the existing values . * @ param deploymentGroupNames * The names of the deployment groups . * @ return Returns a reference to this object so that method calls can be chained together . */ public BatchGetDeploymentGroupsRequest withDeploymentGroupNames ( String ... deploymentGroupNames ) { } }
if ( this . deploymentGroupNames == null ) { setDeploymentGroupNames ( new com . amazonaws . internal . SdkInternalList < String > ( deploymentGroupNames . length ) ) ; } for ( String ele : deploymentGroupNames ) { this . deploymentGroupNames . add ( ele ) ; } return this ;
public class Ifc2x3tc1PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getIfcContextDependentMeasure ( ) { } }
if ( ifcContextDependentMeasureEClass == null ) { ifcContextDependentMeasureEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc2x3tc1Package . eNS_URI ) . getEClassifiers ( ) . get ( 660 ) ; } return ifcContextDependentMeasureEClass ;
public class Instance { /** * Allocates a simple slot on this TaskManager instance . This method returns { @ code null } , if no slot * is available at the moment . * @ return A simple slot that represents a task slot on this TaskManager instance , or null , if the * TaskManager instance has no more slots available . * @ throws InstanceDiedException Thrown if the instance is no longer alive by the time the * slot is allocated . */ public SimpleSlot allocateSimpleSlot ( ) throws InstanceDiedException { } }
synchronized ( instanceLock ) { if ( isDead ) { throw new InstanceDiedException ( this ) ; } Integer nextSlot = availableSlots . poll ( ) ; if ( nextSlot == null ) { return null ; } else { SimpleSlot slot = new SimpleSlot ( this , location , nextSlot , taskManagerGateway ) ; allocatedSlots . add ( slot ) ; return slot ; } }
public class host_cpu_core { /** * < pre > * Converts API response of bulk operation into object and returns the object array in case of get request . * < / pre > */ protected base_resource [ ] get_nitro_bulk_response ( nitro_service service , String response ) throws Exception { } }
host_cpu_core_responses result = ( host_cpu_core_responses ) service . get_payload_formatter ( ) . string_to_resource ( host_cpu_core_responses . class , response ) ; if ( result . errorcode != 0 ) { if ( result . errorcode == SESSION_NOT_EXISTS ) service . clear_session ( ) ; throw new nitro_exception ( result . message , result . errorcode , ( base_response [ ] ) result . host_cpu_core_response_array ) ; } host_cpu_core [ ] result_host_cpu_core = new host_cpu_core [ result . host_cpu_core_response_array . length ] ; for ( int i = 0 ; i < result . host_cpu_core_response_array . length ; i ++ ) { result_host_cpu_core [ i ] = result . host_cpu_core_response_array [ i ] . host_cpu_core [ 0 ] ; } return result_host_cpu_core ;
public class JavaZipFileSystem { /** * { @ inheritDoc } */ public boolean isFile ( final VirtualFile mountPoint , final VirtualFile target ) { } }
final ZipNode zipNode = rootNode . find ( mountPoint , target ) ; return zipNode != null && zipNode . entry != null ;
public class TransmissionDataIterator { /** * Returns a previously allocated instance of this class to the pool */ protected void release ( ) { } }
if ( tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "release" ) ; if ( ! transmissionsRemaining ) { if ( tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "no more transmissions remaining - repooling" ) ; // Ensure we release the byte buffers back into the pool if ( buffer != null ) { buffer . release ( ) ; } connection = null ; conversation = null ; buffer = null ; sendListener = null ; pool . add ( this ) ; } if ( tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "release" ) ;
public class Waiter { /** * Creates a new ActivityMonitor and returns it * @ return an ActivityMonitor */ private ActivityMonitor getActivityMonitor ( ) { } }
IntentFilter filter = null ; ActivityMonitor activityMonitor = instrumentation . addMonitor ( filter , null , false ) ; return activityMonitor ;
public class CmsResourceWrapperUtils { /** * Escapes the value of a property in OpenCms to be displayed * correctly in a property file . < p > * Mainly handles all escaping sequences that start with a backslash . < p > * @ see # unescapeString ( String ) * @ param value the value with the string to be escaped * @ return the escaped string */ private static String escapeString ( String value ) { } }
Map < String , String > substitutions = new HashMap < String , String > ( ) ; substitutions . put ( "\n" , "\\n" ) ; substitutions . put ( "\t" , "\\t" ) ; substitutions . put ( "\r" , "\\r" ) ; return CmsStringUtil . substitute ( value , substitutions ) ;
public class BELValidatorServiceImpl { /** * { @ inheritDoc } */ @ Override public BELParseResults validateBELScript ( File belScriptFile ) { } }
try { String belScriptText = FileUtils . readFileToString ( belScriptFile , UTF_8 ) ; return validateBELScript ( belScriptText ) ; } catch ( IOException e ) { throw new MissingEncodingException ( UTF_8 , e ) ; }
public class SwingUtil { /** * Restores anti - aliasing in the supplied graphics context to its original setting . * @ param rock the results of a previous call to { @ link # activateAntiAliasing } or null , in * which case this method will NOOP . This alleviates every caller having to conditionally avoid * calling restore if they chose not to activate earlier . */ public static void restoreAntiAliasing ( Graphics2D gfx , Object rock ) { } }
if ( rock != null ) { gfx . setRenderingHints ( ( RenderingHints ) rock ) ; }
public class PubSubOutputHandler { /** * sendSilenceMessage may be called from InternalOutputStream * when a Nack is recevied */ public void sendSilenceMessage ( long startStamp , long endStamp , long completedPrefix , boolean requestedOnly , int priority , Reliability reliability , SIBUuid12 stream ) throws SIResourceException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "sendSilenceMessage" , new Object [ ] { new Long ( startStamp ) , new Long ( endStamp ) , new Long ( completedPrefix ) , new Integer ( priority ) , reliability } ) ; ControlSilence sMsg ; try { // Create new Silence message sMsg = _cmf . createNewControlSilence ( ) ; } catch ( Exception e ) { // FFDC FFDCFilter . processException ( e , "com.ibm.ws.sib.processor.impl.PubSubOutputHandler.sendSilenceMessage" , "1:787:1.164.1.5" , this ) ; SibTr . exception ( tc , e ) ; SibTr . error ( tc , "INTERNAL_MESSAGING_ERROR_CWSIP0002" , new Object [ ] { "com.ibm.ws.sib.processor.impl.PubSubOutputHandler" , "1:794:1.164.1.5" , e } ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "sendSilenceMessage" , e ) ; throw new SIResourceException ( nls . getFormattedMessage ( "INTERNAL_MESSAGING_ERROR_CWSIP0002" , new Object [ ] { "com.ibm.ws.sib.processor.impl.PubSubOutputHandler" , "1:805:1.164.1.5" , e } , null ) , e ) ; } // As we are using the Guaranteed Header - set all the attributes as // well as the ones we want . SIMPUtils . setGuaranteedDeliveryProperties ( sMsg , _messageProcessor . getMessagingEngineUuid ( ) , null , stream , null , _destinationHandler . getUuid ( ) , ProtocolType . PUBSUBINPUT , GDConfig . PROTOCOL_VERSION ) ; sMsg . setStartTick ( startStamp ) ; sMsg . setEndTick ( endStamp ) ; sMsg . setPriority ( priority ) ; sMsg . setReliability ( reliability ) ; sMsg . setCompletedPrefix ( completedPrefix ) ; sMsg . setRequestedOnly ( requestedOnly ) ; // If the destination in a Link add Link specific properties to message if ( _isLink ) { sMsg = ( ControlSilence ) addLinkProps ( sMsg ) ; } // Send message to destination // Using MPIO // Send the message to the MessageTransmitter // add a target cellule to the array for sending SIBUuid8 [ ] fromTo = new SIBUuid8 [ 1 ] ; fromTo [ 0 ] = _targetMEUuid ; // Send at priority + 1 if this is a response to a Nack if ( requestedOnly ) _mpio . sendDownTree ( fromTo , priority + 1 , sMsg ) ; else _mpio . sendDownTree ( fromTo , priority , sMsg ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "sendSilenceMessage" ) ;
public class CfgParser { /** * Compute the outside probabilities moving downward from the top of the tree . */ private void downwardChartPass ( CfgParseChart chart ) { } }
Preconditions . checkState ( chart . getInsideCalculated ( ) ) ; // Calculate root marginal , which is not included in the rest of the pass . // Also compute the partition function . Factor rootOutside = chart . getOutsideEntries ( 0 , chart . chartSize ( ) - 1 ) ; Factor rootInside = chart . getInsideEntries ( 0 , chart . chartSize ( ) - 1 ) ; Factor rootMarginal = rootOutside . product ( rootInside ) ; chart . setPartitionFunction ( rootMarginal . marginalize ( parentVar ) . getUnnormalizedProbability ( Assignment . EMPTY ) ) ; double [ ] newValues = new double [ binaryDistributionWeights . getValues ( ) . length ] ; for ( int spanSize = chart . chartSize ( ) - 1 ; spanSize >= 1 ; spanSize -- ) { for ( int spanStart = 0 ; spanStart + spanSize < chart . chartSize ( ) ; spanStart ++ ) { int spanEnd = spanStart + spanSize ; calculateOutside ( spanStart , spanEnd , chart , newValues ) ; } } updateTerminalRuleCounts ( chart ) ; // Outside probabilities / partition function are now calculated . chart . setOutsideCalculated ( ) ;
public class Adresse { /** * Liefert die Strasse in einer abgekuerzten Schreibweise . * @ return z . B . " Badstr . " */ public String getStrasseKurz ( ) { } }
if ( PATTERN_STRASSE . matcher ( strasse ) . matches ( ) ) { return strasse . substring ( 0 , StringUtils . lastIndexOfIgnoreCase ( strasse , "stra" ) + 3 ) + '.' ; } else { return strasse ; }
public class KeyValueHandler { /** * Encodes a { @ link GetRequest } into its lower level representation . * Depending on the flags set on the { @ link GetRequest } , the appropriate opcode gets chosen . Currently , a regular * get , as well as " get and touch " and " get and lock " are supported . Latter variants have server - side side - effects * but do not differ in response behavior . * @ param ctx the { @ link ChannelHandlerContext } to use for allocation and others . * @ param msg the incoming message . * @ return a ready { @ link BinaryMemcacheRequest } . */ private static BinaryMemcacheRequest handleGetRequest ( final ChannelHandlerContext ctx , final GetRequest msg ) { } }
byte opcode ; ByteBuf extras ; if ( msg . lock ( ) ) { opcode = OP_GET_AND_LOCK ; extras = ctx . alloc ( ) . buffer ( ) . writeInt ( msg . expiry ( ) ) ; } else if ( msg . touch ( ) ) { opcode = OP_GET_AND_TOUCH ; extras = ctx . alloc ( ) . buffer ( ) . writeInt ( msg . expiry ( ) ) ; } else { opcode = OP_GET ; extras = Unpooled . EMPTY_BUFFER ; } byte [ ] key = msg . keyBytes ( ) ; short keyLength = ( short ) key . length ; byte extrasLength = ( byte ) extras . readableBytes ( ) ; BinaryMemcacheRequest request = new DefaultBinaryMemcacheRequest ( key ) ; request . setOpcode ( opcode ) . setKeyLength ( keyLength ) . setExtras ( extras ) . setExtrasLength ( extrasLength ) . setTotalBodyLength ( keyLength + extrasLength ) ; return request ;
public class CacheAction { @ Execute public ActionResponse index ( final CacheForm form ) { } }
validate ( form , messages -> { } , ( ) -> asHtml ( virtualHost ( path_Error_ErrorJsp ) ) ) ; if ( isLoginRequired ( ) ) { return redirectToLogin ( ) ; } Map < String , Object > doc = null ; try { doc = searchService . getDocumentByDocId ( form . docId , queryHelper . getCacheResponseFields ( ) , getUserBean ( ) ) . orElse ( null ) ; } catch ( final Exception e ) { logger . warn ( "Failed to request: " + form . docId , e ) ; } if ( doc == null ) { saveError ( messages -> messages . addErrorsDocidNotFound ( GLOBAL , form . docId ) ) ; return redirect ( ErrorAction . class ) ; } final String content = viewHelper . createCacheContent ( doc , form . hq ) ; if ( content == null ) { saveError ( messages -> messages . addErrorsDocidNotFound ( GLOBAL , form . docId ) ) ; return redirect ( ErrorAction . class ) ; } final StreamResponse response = asStream ( DocumentUtil . getValue ( doc , fessConfig . getIndexFieldDocId ( ) , String . class ) ) . contentType ( "text/html; charset=UTF-8" ) . data ( content . getBytes ( Constants . CHARSET_UTF_8 ) ) ; response . headerContentDispositionInline ( ) ; // TODO will be fixed in lastaflute return response ;
public class PropertyResolver { /** * Resolve a property from System Properties ( aka $ { key } ) key : defval is * supported and if key not found on SysProps , defval will be returned * @ param s * @ return resolved string or null if not found in System Properties and no * defval */ private String resolveString ( String s ) { } }
int pos = s . indexOf ( ":" , 0 ) ; if ( pos == - 1 ) return System . getProperty ( s ) ; String key = s . substring ( 0 , pos ) ; String defval = s . substring ( pos + 1 ) ; String val = System . getProperty ( key ) ; if ( val != null ) return val ; else return defval ;
public class UserLayoutManagerFactory { /** * Obtain a regular user layout manager implementation ( which allows transient layout * alterations ) . The specific layout type depends on whether the user is a guest user . * @ return an < code > IUserLayoutManager < / code > value */ public IUserLayoutManager getUserLayoutManager ( IPerson person , IUserProfile profile ) throws PortalException { } }
final IUserLayoutManager userLayoutManager = ( IUserLayoutManager ) this . beanFactory . getBean ( USER_LAYOUT_MANAGER_PROTOTYPE_BEAN_NAME , person , profile ) ; if ( person . isGuest ( ) ) { return new ImmutableTransientUserLayoutManagerWrapper ( userLayoutManager ) ; } return new TransientUserLayoutManagerWrapper ( userLayoutManager ) ;
public class TemperatureConversion { /** * Convert a temperature value from another temperature scale into the Celsius temperature scale . * @ param from TemperatureScale * @ param temperature value from other scale * @ return converted temperature value in degrees centigrade */ public static double convertToCelsius ( TemperatureScale from , double temperature ) { } }
switch ( from ) { case FARENHEIT : return convertFarenheitToCelsius ( temperature ) ; case CELSIUS : return temperature ; case KELVIN : return convertKelvinToCelsius ( temperature ) ; case RANKINE : return convertRankineToCelsius ( temperature ) ; default : throw ( new RuntimeException ( "Invalid termpature conversion" ) ) ; }
public class FileSplitter { /** * This method writes a partial input file to a split file * @ param splitDir split file directory * @ param data data * @ return < code > File < / code > new split file */ private static File writePartToFile ( File splitDir , List < String > data ) { } }
BufferedWriter writer = null ; File splitFile ; try { splitFile = File . createTempFile ( "split-" , ".part" , splitDir ) ; writer = new BufferedWriter ( new OutputStreamWriter ( new FileOutputStream ( splitFile . getAbsoluteFile ( ) , false ) , DataUtilDefaults . charSet ) ) ; for ( String item : data ) { writer . write ( item + DataUtilDefaults . lineTerminator ) ; } writer . flush ( ) ; writer . close ( ) ; writer = null ; } catch ( UnsupportedEncodingException e ) { throw new DataUtilException ( e ) ; } catch ( FileNotFoundException e ) { throw new DataUtilException ( e ) ; } catch ( IOException e ) { throw new DataUtilException ( e ) ; } finally { if ( writer != null ) { try { writer . close ( ) ; } catch ( IOException e ) { // Intentionally we ' re doing nothing here } } } return splitFile ;
public class RestCallbackBuilder { /** * create rest callback implementation . * @ param pview view of the * @ param pdata data given from server * @ param psession session * @ param pcallbackOnSuccess on success callback * @ param < P > presenter type * @ param < D > data type given to the server * @ param < V > view or widget which implements EditorWithErrorHandling interface * @ param < R > rest result type * @ param < H > http messages * @ return RestCallbackImpl */ public static < P , D , V extends EditorWithErrorHandling < P , D > , R , H extends HttpMessages > RestCallbackImpl < P , D , V , R , H > build ( final V pview , final D pdata , final Session psession , final AsyncCallbackOnSuccess < R > pcallbackOnSuccess ) { } }
return new RestCallbackImpl < > ( pview , pdata , psession , pcallbackOnSuccess ) ;
public class AcceptAllSocketFactory { /** * In the case of this factory the intent is to ensure that a truststore is not set , * as this does not make sense in the context of an accept - all policy */ @ Override public void initWithNiwsConfig ( IClientConfig clientConfig ) { } }
if ( clientConfig == null ) { return ; } if ( clientConfig . getOrDefault ( CommonClientConfigKey . TrustStore ) != null ) { throw new IllegalArgumentException ( "Client configured with an AcceptAllSocketFactory cannot utilize a truststore" ) ; }
public class GeometryRendererImpl { public void onGeometryIndexSelected ( GeometryIndexSelectedEvent event ) { } }
for ( GeometryIndex index : event . getIndices ( ) ) { update ( event . getGeometry ( ) , index , false ) ; }
public class CmsPropertyChange { /** * Performs the main property change value operation on the resource property . < p > * @ param recursive true , if the property value has to be changed recursively , otherwise false * @ return true , if the property values are changed successfully , otherwise false * @ throws CmsException if changing is not successful */ private boolean performChangeOperation ( boolean recursive ) throws CmsException { } }
// on recursive property changes display " please wait " screen if ( recursive && ! DIALOG_WAIT . equals ( getParamAction ( ) ) ) { // return false , this will trigger the " please wait " screen return false ; } // lock the selected resource checkLock ( getParamResource ( ) ) ; // change the property values List changedResources = null ; if ( CmsStringUtil . isNotEmptyOrWhitespaceOnly ( getParamOldValue ( ) ) ) { changedResources = getCms ( ) . changeResourcesInFolderWithProperty ( getParamResource ( ) , getParamPropertyName ( ) , getParamOldValue ( ) , getParamNewValue ( ) , recursive ) ; } else { changedResources = setPropertyInFolder ( getParamResource ( ) , getParamPropertyName ( ) , getParamNewValue ( ) , recursive ) ; } setChangedResources ( changedResources ) ; return true ;
public class HeapCompactOrderedSketch { /** * Converts the given UpdateSketch to this compact form . * @ param sketch the given UpdateSketch * @ return a CompactSketch */ static CompactSketch compact ( final UpdateSketch sketch ) { } }
final int curCount = sketch . getRetainedEntries ( true ) ; long thetaLong = sketch . getThetaLong ( ) ; boolean empty = sketch . isEmpty ( ) ; thetaLong = thetaOnCompact ( empty , curCount , thetaLong ) ; empty = emptyOnCompact ( curCount , thetaLong ) ; final short seedHash = sketch . getSeedHash ( ) ; final long [ ] cache = sketch . getCache ( ) ; final boolean ordered = true ; final long [ ] cacheOut = CompactSketch . compactCache ( cache , curCount , thetaLong , ordered ) ; if ( ( curCount == 1 ) && ( thetaLong == Long . MAX_VALUE ) ) { return new SingleItemSketch ( cacheOut [ 0 ] , seedHash ) ; } return new HeapCompactOrderedSketch ( cacheOut , empty , seedHash , curCount , thetaLong ) ;
public class GoroService { /** * Create an intent that contains a task that should be scheduled * on a defined queue . The Service will run in the foreground and display notification . * Intent can be used as an argument for * { @ link android . content . Context # startService ( android . content . Intent ) } * or { @ link android . content . Context # startForegroundService ( Intent ) } * @ param context context instance * @ param task task instance * @ param < T > task type * @ param notificationId id of notification for foreground Service , must not be 0 * @ param notification notification for foreground Service , * should be not null to start service in the foreground */ public static < T extends Callable < ? > & Parcelable > Intent foregroundTaskIntent ( final Context context , final T task , final int notificationId , final Notification notification ) { } }
return foregroundTaskIntent ( context , Goro . DEFAULT_QUEUE , task , notificationId , notification ) ;
public class GitlabAPI { /** * Get a list of projects by pagination accessible by the authenticated user . * @ param pagination * @ return * @ throws IOException on gitlab api call error */ public List < GitlabProject > getProjectsWithPagination ( Pagination pagination ) throws IOException { } }
StringBuilder tailUrl = new StringBuilder ( GitlabProject . URL ) ; if ( pagination != null ) { Query query = pagination . asQuery ( ) ; tailUrl . append ( query . toString ( ) ) ; } return Arrays . asList ( retrieve ( ) . method ( GET ) . to ( tailUrl . toString ( ) , GitlabProject [ ] . class ) ) ;
public class DateUtil { /** * Adds a number of hours to a calendar returning a new object . * The original { @ code Date } is unchanged . * @ param calendar the calendar , not null * @ param amount the amount to add , may be negative * @ return the new { @ code Date } with the amount added * @ throws IllegalArgumentException if the calendar is null */ public static < T extends Calendar > T addHours ( final T calendar , final int amount ) { } }
return roll ( calendar , amount , CalendarUnit . HOUR ) ;
public class SnsMessageUnmarshaller { /** * Unmarshall into a { @ link SnsSubscriptionConfirmation } object . * @ param message JSON message . * @ return Builder with sub properties filled in . Base properties are added by * { @ link # unmarshallBase ( SnsMessage . Builder , SnsJsonNode ) } . */ private SnsMessage . Builder < ? extends SnsMessage . Builder > unmarshallSubscriptionConfirmation ( SnsJsonNode message ) { } }
return SnsSubscriptionConfirmation . builder ( client ) . withSubscribeUrl ( message . getString ( SUBSCRIBE_URL ) ) . withToken ( message . getString ( TOKEN ) ) . withMessage ( message . getString ( MESSAGE ) ) ;
public class PgDatabaseMetaData { /** * { @ inheritDoc } * < p > From PostgreSQL 9.0 + return the keywords from pg _ catalog . pg _ get _ keywords ( ) < / p > * @ return a comma separated list of keywords we use * @ throws SQLException if a database access error occurs */ @ Override public String getSQLKeywords ( ) throws SQLException { } }
connection . checkClosed ( ) ; if ( keywords == null ) { if ( connection . haveMinimumServerVersion ( ServerVersion . v9_0 ) ) { // Exclude SQL : 2003 keywords ( https : / / github . com / ronsavage / SQL / blob / master / sql - 2003-2 . bnf ) // from the returned list , ugly but required by jdbc spec . String sql = "select string_agg(word, ',') from pg_catalog.pg_get_keywords() " + "where word <> ALL ('{a,abs,absolute,action,ada,add,admin,after,all,allocate,alter," + "always,and,any,are,array,as,asc,asensitive,assertion,assignment,asymmetric,at,atomic," + "attribute,attributes,authorization,avg,before,begin,bernoulli,between,bigint,binary," + "blob,boolean,both,breadth,by,c,call,called,cardinality,cascade,cascaded,case,cast," + "catalog,catalog_name,ceil,ceiling,chain,char,char_length,character,character_length," + "character_set_catalog,character_set_name,character_set_schema,characteristics," + "characters,check,checked,class_origin,clob,close,coalesce,cobol,code_units,collate," + "collation,collation_catalog,collation_name,collation_schema,collect,column," + "column_name,command_function,command_function_code,commit,committed,condition," + "condition_number,connect,connection_name,constraint,constraint_catalog,constraint_name," + "constraint_schema,constraints,constructors,contains,continue,convert,corr," + "corresponding,count,covar_pop,covar_samp,create,cross,cube,cume_dist,current," + "current_collation,current_date,current_default_transform_group,current_path," + "current_role,current_time,current_timestamp,current_transform_group_for_type,current_user," + "cursor,cursor_name,cycle,data,date,datetime_interval_code,datetime_interval_precision," + "day,deallocate,dec,decimal,declare,default,defaults,deferrable,deferred,defined,definer," + "degree,delete,dense_rank,depth,deref,derived,desc,describe,descriptor,deterministic," + "diagnostics,disconnect,dispatch,distinct,domain,double,drop,dynamic,dynamic_function," + "dynamic_function_code,each,element,else,end,end-exec,equals,escape,every,except," + "exception,exclude,excluding,exec,execute,exists,exp,external,extract,false,fetch,filter," + "final,first,float,floor,following,for,foreign,fortran,found,free,from,full,function," + "fusion,g,general,get,global,go,goto,grant,granted,group,grouping,having,hierarchy,hold," + "hour,identity,immediate,implementation,in,including,increment,indicator,initially," + "inner,inout,input,insensitive,insert,instance,instantiable,int,integer,intersect," + "intersection,interval,into,invoker,is,isolation,join,k,key,key_member,key_type,language," + "large,last,lateral,leading,left,length,level,like,ln,local,localtime,localtimestamp," + "locator,lower,m,map,match,matched,max,maxvalue,member,merge,message_length," + "message_octet_length,message_text,method,min,minute,minvalue,mod,modifies,module,month," + "more,multiset,mumps,name,names,national,natural,nchar,nclob,nesting,new,next,no,none," + "normalize,normalized,not,\"null\",nullable,nullif,nulls,number,numeric,object," + "octet_length,octets,of,old,on,only,open,option,options,or,order,ordering,ordinality," + "others,out,outer,output,over,overlaps,overlay,overriding,pad,parameter,parameter_mode," + "parameter_name,parameter_ordinal_position,parameter_specific_catalog," + "parameter_specific_name,parameter_specific_schema,partial,partition,pascal,path," + "percent_rank,percentile_cont,percentile_disc,placing,pli,position,power,preceding," + "precision,prepare,preserve,primary,prior,privileges,procedure,public,range,rank,read," + "reads,real,recursive,ref,references,referencing,regr_avgx,regr_avgy,regr_count," + "regr_intercept,regr_r2,regr_slope,regr_sxx,regr_sxy,regr_syy,relative,release," + "repeatable,restart,result,return,returned_cardinality,returned_length," + "returned_octet_length,returned_sqlstate,returns,revoke,right,role,rollback,rollup," + "routine,routine_catalog,routine_name,routine_schema,row,row_count,row_number,rows," + "savepoint,scale,schema,schema_name,scope_catalog,scope_name,scope_schema,scroll," + "search,second,section,security,select,self,sensitive,sequence,serializable,server_name," + "session,session_user,set,sets,similar,simple,size,smallint,some,source,space,specific," + "specific_name,specifictype,sql,sqlexception,sqlstate,sqlwarning,sqrt,start,state," + "statement,static,stddev_pop,stddev_samp,structure,style,subclass_origin,submultiset," + "substring,sum,symmetric,system,system_user,table,table_name,tablesample,temporary,then," + "ties,time,timestamp,timezone_hour,timezone_minute,to,top_level_count,trailing," + "transaction,transaction_active,transactions_committed,transactions_rolled_back," + "transform,transforms,translate,translation,treat,trigger,trigger_catalog,trigger_name," + "trigger_schema,trim,true,type,uescape,unbounded,uncommitted,under,union,unique,unknown," + "unnamed,unnest,update,upper,usage,user,user_defined_type_catalog,user_defined_type_code," + "user_defined_type_name,user_defined_type_schema,using,value,values,var_pop,var_samp," + "varchar,varying,view,when,whenever,where,width_bucket,window,with,within,without,work," + "write,year,zone}'::text[])" ; Statement stmt = null ; ResultSet rs = null ; try { stmt = connection . createStatement ( ) ; rs = stmt . executeQuery ( sql ) ; if ( ! rs . next ( ) ) { throw new PSQLException ( GT . tr ( "Unable to find keywords in the system catalogs." ) , PSQLState . UNEXPECTED_ERROR ) ; } keywords = rs . getString ( 1 ) ; } finally { JdbcBlackHole . close ( rs ) ; JdbcBlackHole . close ( stmt ) ; } } else { // Static list from PG8.2 src / backend / parser / keywords . c with SQL : 2003 excluded . keywords = "abort,access,aggregate,also,analyse,analyze,backward,bit,cache,checkpoint,class," + "cluster,comment,concurrently,connection,conversion,copy,csv,database,delimiter," + "delimiters,disable,do,enable,encoding,encrypted,exclusive,explain,force,forward,freeze," + "greatest,handler,header,if,ilike,immutable,implicit,index,indexes,inherit,inherits," + "instead,isnull,least,limit,listen,load,location,lock,mode,move,nothing,notify,notnull," + "nowait,off,offset,oids,operator,owned,owner,password,prepared,procedural,quote,reassign," + "recheck,reindex,rename,replace,reset,restrict,returning,rule,setof,share,show,stable," + "statistics,stdin,stdout,storage,strict,sysid,tablespace,temp,template,truncate,trusted," + "unencrypted,unlisten,until,vacuum,valid,validator,verbose,volatile" ; } } return keywords ;
public class AnimaQuery { /** * Paging query results by sql * @ param sql sql statement * @ param pageRow page param * @ return Page */ public Page < T > page ( String sql , PageRow pageRow ) { } }
return this . page ( sql , paramValues , pageRow ) ;
public class TileBasedLayerClient { /** * Create a new OSM layer with the given ID and tile configuration . The layer will be configured * with the default OSM tile services so you don ' t have to specify these URLs yourself . * @ param id The unique ID of the layer . * @ param conf The tile configuration . * @ return A new OSM layer . * @ deprecated use { @ link # createDefaultOsmLayer ( String , int ) } */ @ Deprecated public OsmLayer createDefaultOsmLayer ( String id , TileConfiguration conf ) { } }
OsmLayer layer = new OsmLayer ( id , conf ) ; layer . addUrls ( Arrays . asList ( DEFAULT_OSM_URLS ) ) ; return layer ;
public class CmsImportVersion7 { /** * Sets the organizational unit flags . < p > * @ param orgUnitFlags the flags to set */ public void setOrgUnitFlags ( String orgUnitFlags ) { } }
try { m_orgUnitFlags = Integer . parseInt ( orgUnitFlags ) ; } catch ( Throwable e ) { setThrowable ( e ) ; }
public class Choice { /** * Maps the choices with the specified function . */ public < R > Choice < R > transform ( final Function < ? super T , R > function ) { } }
checkNotNull ( function ) ; final Choice < T > thisChoice = this ; return new Choice < R > ( ) { @ Override protected Iterator < R > iterator ( ) { return Iterators . transform ( thisChoice . iterator ( ) , function ) ; } } ;
public class AccordionPanel { /** * Remove the given component from this accordion * @ param component The component to remove */ public void removeFromAccordion ( JComponent component ) { } }
CollapsiblePanel collapsiblePanel = collapsiblePanels . get ( component ) ; if ( collapsiblePanel != null ) { contentPanel . remove ( collapsiblePanel ) ; collapsiblePanels . remove ( component ) ; revalidate ( ) ; }
public class ListRunsResult { /** * Information about the runs . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setRuns ( java . util . Collection ) } or { @ link # withRuns ( java . util . Collection ) } if you want to override the * existing values . * @ param runs * Information about the runs . * @ return Returns a reference to this object so that method calls can be chained together . */ public ListRunsResult withRuns ( Run ... runs ) { } }
if ( this . runs == null ) { setRuns ( new java . util . ArrayList < Run > ( runs . length ) ) ; } for ( Run ele : runs ) { this . runs . add ( ele ) ; } return this ;
public class RaftSessionListener { /** * Closes the session event listener . * @ return A completable future to be completed once the listener is closed . */ public CompletableFuture < Void > close ( ) { } }
protocol . unregisterPublishListener ( state . getSessionId ( ) ) ; return CompletableFuture . completedFuture ( null ) ;
public class DynamicRepositoryDecoratorRegistryImpl { /** * Decorates a { @ link Repository } with one or more { @ link DynamicDecorator } s . , based on the { @ link * DecoratorConfiguration } entity . */ @ SuppressWarnings ( "unchecked" ) private Repository < Entity > decorateRepository ( Repository < Entity > repository , DecoratorConfiguration configuration ) { } }
Map < String , Map < String , Object > > parameterMap = getParameterMap ( configuration ) ; List < DecoratorParameters > decoratorParameters = configuration . getDecoratorParameters ( ) . collect ( toList ( ) ) ; for ( DecoratorParameters decoratorParam : decoratorParameters ) { DynamicRepositoryDecoratorFactory factory = factories . get ( decoratorParam . getDecorator ( ) . getId ( ) ) ; if ( factory != null ) { repository = factory . createDecoratedRepository ( repository , parameterMap . get ( factory . getId ( ) ) ) ; } } return repository ;
public class JsHdrsImpl { /** * Get the identity of the destination definition ( not localisation ) * Javadoc description supplied by CommonMessageHeaders interface . */ public final SIBUuid12 getGuaranteedTargetDestinationDefinitionUUID ( ) { } }
byte [ ] b = ( byte [ ] ) getHdr2 ( ) . getField ( JsHdr2Access . GUARANTEED_SET_TARGETDESTDEFUUID ) ; if ( b != null ) return new SIBUuid12 ( b ) ; return null ;
public class UpdateClusterUtils { /** * Creates a replica of the node with the new partitions list * @ param node The node whose replica we are creating * @ param partitionsList The new partitions list * @ return Replica of node with new partitions list */ public static Node updateNode ( Node node , List < Integer > partitionsList ) { } }
return new Node ( node . getId ( ) , node . getHost ( ) , node . getHttpPort ( ) , node . getSocketPort ( ) , node . getAdminPort ( ) , node . getZoneId ( ) , partitionsList ) ;
public class DoubleArrayTrie { /** * 从磁盘加载 , 需要额外提供值 * @ param path * @ param value * @ return */ public boolean load ( String path , V [ ] value ) { } }
if ( ! ( IOAdapter == null ? loadBaseAndCheckByFileChannel ( path ) : load ( ByteArrayStream . createByteArrayStream ( path ) , value ) ) ) return false ; v = value ; return true ;
public class ListContext { /** * Create a new { @ link ArrayList } instance adding all values of the given * lists together . The second list will essentially be added after the * first list . Note that any duplicates between lists will remain as * duplicates . * @ param list1 The first list to merge * @ param list2 The second list to merge * @ return The newly created list containing the merged lists */ @ SuppressWarnings ( { } }
"unchecked" , "rawtypes" } ) public List < ? > mergeLists ( List < ? > list1 , List < ? > list2 ) { List list = new ArrayList ( list1 . size ( ) + list2 . size ( ) ) ; list . addAll ( list1 ) ; list . addAll ( list2 ) ; return list ;
public class RowProcessingPublisher { /** * Closes consumers of this { @ link RowProcessingPublisher } . Usually this * will be done automatically when * { @ link # runRowProcessing ( Queue , TaskListener ) } is invoked . */ public void closeConsumers ( ) { } }
final List < RowProcessingConsumer > configurableConsumers = getConfigurableConsumers ( ) ; final TaskRunner taskRunner = _publishers . getTaskRunner ( ) ; for ( RowProcessingConsumer consumer : configurableConsumers ) { TaskRunnable task = createCloseTask ( consumer , null ) ; taskRunner . run ( task ) ; }
public class Hierarchy { /** * Look up the field referenced by given FieldInstruction , returning it as * an { @ link XField XField } object . * @ param fins * the FieldInstruction * @ param cpg * the ConstantPoolGen used by the class containing the * instruction * @ return an XField object representing the field , or null if no such field * could be found */ public static @ CheckForNull XField findXField ( FieldInstruction fins , @ Nonnull ConstantPoolGen cpg ) { } }
String className = fins . getClassName ( cpg ) ; String fieldName = fins . getFieldName ( cpg ) ; String fieldSig = fins . getSignature ( cpg ) ; boolean isStatic = ( fins . getOpcode ( ) == Const . GETSTATIC || fins . getOpcode ( ) == Const . PUTSTATIC ) ; XField xfield = findXField ( className , fieldName , fieldSig , isStatic ) ; short opcode = fins . getOpcode ( ) ; if ( xfield != null && xfield . isResolved ( ) && xfield . isStatic ( ) == ( opcode == Const . GETSTATIC || opcode == Const . PUTSTATIC ) ) { return xfield ; } else { return null ; }
public class I18nModuleBuilder { /** * Adds the source for the locale specific i18n resource if it exists . * @ param list * The list of source files to add the i18n resource to * @ param bundleRoot * The module path for the bundle root ( e . g . ' foo / nls ' ) * @ param bundleRootRes * The bundle root resource that was requested * @ param localePath * The path relative to the bundle root for the locale specific * resource ( e . g . ' en - us / bar ' ) * @ return True if the source file for for the locale specified resource was * added * @ throws IOException */ private boolean tryAddModule ( IAggregator aggregator , List < String > list , String bundleRoot , IResource bundleRootRes , String locale , String resource , Collection < String > availableLocales ) throws IOException { } }
if ( availableLocales != null && ! availableLocales . contains ( locale ) ) { return false ; } boolean result = false ; URI uri = bundleRootRes . getURI ( ) ; URI testUri = uri . resolve ( locale + "/" + resource + ".js" ) ; // $ NON - NLS - 1 $ / / $ NON - NLS - 2 $ IResource testResource = aggregator . newResource ( testUri ) ; if ( availableLocales != null || testResource . exists ( ) ) { String mid = bundleRoot + "/" + locale + "/" + resource ; // $ NON - NLS - 1 $ / / $ NON - NLS - 2 $ list . add ( mid ) ; result = true ; } return result ;
public class Slf4jToFileBenchmark { /** * structured logging without parametrization with logstash */ @ Warmup ( iterations = 5 ) @ Measurement ( iterations = 5 ) @ Benchmark public void logstashStructuredLogging1Calls ( ) { } }
loggerLogstash . info ( "Event with double and boolean" , keyValue ( "varDouble" , 1.2 ) , keyValue ( "varBoolean" , false ) ) ;
public class SamzaEntranceProcessingItem { /** * Implement Samza Task */ @ Override public void init ( Config config , TaskContext context ) throws Exception { } }
String yarnConfHome = config . get ( SamzaConfigFactory . YARN_CONF_HOME_KEY ) ; if ( yarnConfHome != null && yarnConfHome . length ( ) > 0 ) // if the property is set , otherwise , assume we are running in // local mode and ignore this SystemsUtils . setHadoopConfigHome ( yarnConfHome ) ; String filename = config . get ( SamzaConfigFactory . FILE_KEY ) ; String filesystem = config . get ( SamzaConfigFactory . FILESYSTEM_KEY ) ; this . setName ( config . get ( SamzaConfigFactory . JOB_NAME_KEY ) ) ; SerializationProxy wrapper = ( SerializationProxy ) SystemsUtils . deserializeObjectFromFileAndKey ( filesystem , filename , this . getName ( ) ) ; this . setOutputStream ( wrapper . outputStream ) ; SamzaStream output = ( SamzaStream ) this . getOutputStream ( ) ; if ( output != null ) // if output stream exists , set it up output . onCreate ( ) ;
public class ZKClient { /** * When the listener is set , it will receive the { @ link LifecycleListener # onConnected ( ) } callback * right away if ZooKeeper is already connected . This guarantees that you will never miss the * connected event irrelevant of whether you set the listener before or after calling * { @ link # start ( ) } . */ @ Override public void registerListener ( LifecycleListener listener ) { } }
if ( listener == null ) throw new NullPointerException ( "listener is null" ) ; synchronized ( _lock ) { if ( _listeners == null || ! _listeners . contains ( listener ) ) { Set < LifecycleListener > listeners = new HashSet < LifecycleListener > ( ) ; if ( _listeners != null ) listeners . addAll ( _listeners ) ; listeners . add ( listener ) ; _listeners = Collections . unmodifiableSet ( listeners ) ; if ( _stateChangeDispatcher == null ) { _stateChangeDispatcher = new StateChangeDispatcher ( ) ; _stateChangeDispatcher . setDaemon ( true ) ; _stateChangeDispatcher . start ( ) ; } if ( _state == State . CONNECTED ) { // since the listener is new and the client is already connected , we need to send // the connected event to this listener ! _stateChangeDispatcher . addEvent ( Arrays . asList ( listener ) , null , State . CONNECTED ) ; } } }
public class ConcatVectorTable { /** * Deep comparison for equality of value , plus tolerance , for every concatvector in the table , plus dimensional * arrangement . This is mostly useful for testing . * @ param other the vector table to compare against * @ param tolerance the tolerance to use in value comparisons * @ return whether the two tables are equivalent by value */ public boolean valueEquals ( ConcatVectorTable other , double tolerance ) { } }
if ( ! Arrays . equals ( other . getDimensions ( ) , getDimensions ( ) ) ) return false ; for ( int [ ] assignment : this ) { if ( ! getAssignmentValue ( assignment ) . get ( ) . valueEquals ( other . getAssignmentValue ( assignment ) . get ( ) , tolerance ) ) { return false ; } } return true ;
public class AnalyticFormulas { /** * Calculates the Black - Scholes option value of a call , i . e . , the payoff max ( S ( T ) - K , 0 ) , where S follows a log - normal process with constant log - volatility . * @ param initialStockValue The spot value of the underlying . * @ param riskFreeRate The risk free rate r ( df = exp ( - r T ) ) . * @ param volatility The Black - Scholes volatility . * @ param optionMaturity The option maturity T . * @ param optionStrike The option strike . If the option strike is & le ; 0.0 the method returns the value of the forward contract paying S ( T ) - K in T . * @ return Returns the value of a European call option under the Black - Scholes model . */ public static double blackScholesOptionValue ( double initialStockValue , double riskFreeRate , double volatility , double optionMaturity , double optionStrike ) { } }
return blackScholesGeneralizedOptionValue ( initialStockValue * Math . exp ( riskFreeRate * optionMaturity ) , // forward volatility , optionMaturity , optionStrike , Math . exp ( - riskFreeRate * optionMaturity ) // payoff unit ) ;
public class PartialMerkleTree { /** * Based on CPartialMerkleTree : : TraverseAndBuild in Bitcoin Core . */ private static void traverseAndBuild ( int height , int pos , List < Sha256Hash > allLeafHashes , byte [ ] includeBits , List < Boolean > matchedChildBits , List < Sha256Hash > resultHashes ) { } }
boolean parentOfMatch = false ; // Is this node a parent of at least one matched hash ? for ( int p = pos << height ; p < ( pos + 1 ) << height && p < allLeafHashes . size ( ) ; p ++ ) { if ( Utils . checkBitLE ( includeBits , p ) ) { parentOfMatch = true ; break ; } } // Store as a flag bit . matchedChildBits . add ( parentOfMatch ) ; if ( height == 0 || ! parentOfMatch ) { // If at height 0 , or nothing interesting below , store hash and stop . resultHashes . add ( calcHash ( height , pos , allLeafHashes ) ) ; } else { // Otherwise descend into the subtrees . int h = height - 1 ; int p = pos * 2 ; traverseAndBuild ( h , p , allLeafHashes , includeBits , matchedChildBits , resultHashes ) ; if ( p + 1 < getTreeWidth ( allLeafHashes . size ( ) , h ) ) traverseAndBuild ( h , p + 1 , allLeafHashes , includeBits , matchedChildBits , resultHashes ) ; }
public class BamManager { /** * These methods aim to provide a very simple , safe and quick way of accessing to a small fragment of the BAM / CRAM file . * This must not be used in production for reading big data files . It returns a maximum of 50,000 SAM records , * you can use iterator methods for reading more reads . */ public List < SAMRecord > query ( AlignmentFilters < SAMRecord > filters ) throws IOException { } }
return query ( null , filters , null , SAMRecord . class ) ;
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link Boolean } { @ code > } */ @ XmlElementDecl ( namespace = "http://docs.oasis-open.org/ns/cmis/messaging/200908/" , name = "includePathSegment" , scope = GetDescendants . class ) public JAXBElement < Boolean > createGetDescendantsIncludePathSegment ( Boolean value ) { } }
return new JAXBElement < Boolean > ( _GetDescendantsIncludePathSegment_QNAME , Boolean . class , GetDescendants . class , value ) ;
public class XMLOutputOperator { /** * Formats the complete tag tree to an output stream */ public void dumpTo ( AreaTree tree , PrintWriter out ) { } }
if ( produceHeader ) out . println ( "<?xml version=\"1.0\"?>" ) ; out . println ( "<areaTree base=\"" + HTMLEntities ( tree . getRoot ( ) . getPage ( ) . getSourceURL ( ) . toString ( ) ) + "\">" ) ; recursiveDump ( tree . getRoot ( ) , 1 , out ) ; out . println ( "</areaTree>" ) ;
public class MathUtility { /** * 使用log - sum - exp技巧来归一化一组对数值 * @ param predictionScores */ public static void normalizeExp ( Map < String , Double > predictionScores ) { } }
Set < Map . Entry < String , Double > > entrySet = predictionScores . entrySet ( ) ; double max = Double . NEGATIVE_INFINITY ; for ( Map . Entry < String , Double > entry : entrySet ) { max = Math . max ( max , entry . getValue ( ) ) ; } double sum = 0.0 ; // 通过减去最大值防止浮点数溢出 for ( Map . Entry < String , Double > entry : entrySet ) { Double value = Math . exp ( entry . getValue ( ) - max ) ; entry . setValue ( value ) ; sum += value ; } if ( sum != 0.0 ) { for ( Map . Entry < String , Double > entry : entrySet ) { predictionScores . put ( entry . getKey ( ) , entry . getValue ( ) / sum ) ; } }
public class PatternsImpl { /** * Updates a pattern . * @ param appId The application ID . * @ param versionId The version ID . * @ param patternId The pattern ID . * @ param pattern An object representing a pattern . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < PatternRuleInfo > updatePatternAsync ( UUID appId , String versionId , UUID patternId , PatternRuleUpdateObject pattern , final ServiceCallback < PatternRuleInfo > serviceCallback ) { } }
return ServiceFuture . fromResponse ( updatePatternWithServiceResponseAsync ( appId , versionId , patternId , pattern ) , serviceCallback ) ;
public class CacheOnDisk { /** * Call this method when the alarm is triggered . It is being checked to see whether a disk cleanup is scheduled to * run . */ public void alarm ( final Object alarmContext ) { } }
final String methodName = "alarm()" ; synchronized ( this ) { if ( ! stopping && ! this . htod . invalidationBuffer . isDiskClearInProgress ( ) ) { this . htod . invalidationBuffer . invokeBackgroundInvalidation ( HTODInvalidationBuffer . SCAN ) ; } else if ( stopping ) { traceDebug ( methodName , "cacheName=" + this . cacheName + " abort disk cleanup because of server is stopping." ) ; } else { if ( cleanupFrequency == 0 ) { sleepTime = calculateSleepTime ( ) ; } traceDebug ( methodName , "cacheName=" + this . cacheName + " disk clear is in progress - skip disk scan and set alarm sleepTime=" + sleepTime ) ; Scheduler . createNonDeferrable ( sleepTime , alarmContext , new Runnable ( ) { @ Override public void run ( ) { alarm ( alarmContext ) ; } } ) ; } }
public class SqlLineOpts { /** * The save directory if HOME / . sqlline / on UNIX , and HOME / sqlline / on * Windows . * @ return save directory */ public static File saveDir ( ) { } }
String dir = System . getProperty ( "sqlline.rcfile" ) ; if ( dir != null && dir . length ( ) > 0 ) { return new File ( dir ) ; } String baseDir = System . getProperty ( SqlLine . SQLLINE_BASE_DIR ) ; if ( baseDir != null && baseDir . length ( ) > 0 ) { File saveDir = new File ( baseDir ) . getAbsoluteFile ( ) ; saveDir . mkdirs ( ) ; return saveDir ; } File f = new File ( System . getProperty ( "user.home" ) , ( ( System . getProperty ( "os.name" ) . toLowerCase ( Locale . ROOT ) . contains ( "windows" ) ) ? "" : "." ) + "sqlline" ) . getAbsoluteFile ( ) ; try { f . mkdirs ( ) ; } catch ( Exception e ) { // ignore } return f ;
public class SpiderAPI { /** * Starts a spider scan at the given { @ code url } and , optionally , with the perspective of the given { @ code user } . * @ param url the url to start the spider scan * @ param user the user to scan as , or null if the scan is done without the perspective of any user * @ param maxChildren Max number of children to scan * @ param recurse Whether or not to scan recursively * @ param context the context that will be used during spider process , might be { @ code null } * @ param subtreeOnly if the scan should be done only under a site ' s subtree * @ return the ID of the newly started scan * @ throws ApiException if the { @ code url } is not valid */ private int scanURL ( String url , User user , int maxChildren , boolean recurse , Context context , boolean subtreeOnly ) throws ApiException { } }
log . debug ( "API Spider scanning url: " + url ) ; boolean useUrl = true ; if ( url == null || url . isEmpty ( ) ) { if ( context == null || ! context . hasNodesInContextFromSiteTree ( ) ) { throw new ApiException ( Type . MISSING_PARAMETER , PARAM_URL ) ; } useUrl = false ; } else if ( context != null && ! context . isInContext ( url ) ) { throw new ApiException ( Type . URL_NOT_IN_CONTEXT , PARAM_URL ) ; } StructuralNode node = null ; URI startURI = null ; if ( useUrl ) { try { // Try to build uri startURI = new URI ( url , true ) ; } catch ( URIException e ) { throw new ApiException ( ApiException . Type . ILLEGAL_PARAMETER , PARAM_URL ) ; } String scheme = startURI . getScheme ( ) ; if ( scheme == null || ( ! scheme . equalsIgnoreCase ( "http" ) && ! scheme . equalsIgnoreCase ( "https" ) ) ) { throw new ApiException ( ApiException . Type . ILLEGAL_PARAMETER , PARAM_URL ) ; } node = getStartNode ( startURI , recurse ) ; } Target target = new Target ( ) ; if ( useUrl && node != null ) { target . setStartNode ( node ) ; } target . setContext ( context ) ; target . setRecurse ( recurse ) ; switch ( Control . getSingleton ( ) . getMode ( ) ) { case safe : throw new ApiException ( ApiException . Type . MODE_VIOLATION ) ; case protect : if ( ( useUrl && ! Model . getSingleton ( ) . getSession ( ) . isInScope ( url ) ) || ( context != null && ! context . isInScope ( ) ) ) { throw new ApiException ( ApiException . Type . MODE_VIOLATION ) ; } // No problem break ; case standard : // No problem break ; case attack : // No problem break ; } List < Object > objs = new ArrayList < > ( 4 ) ; if ( startURI != null ) { objs . add ( startURI ) ; if ( subtreeOnly ) { objs . add ( new HttpPrefixFetchFilter ( startURI ) ) ; } } if ( maxChildren > 0 ) { // Add the filters to filter on maximum number of children MaxChildrenFetchFilter maxChildrenFetchFilter = new MaxChildrenFetchFilter ( ) ; maxChildrenFetchFilter . setMaxChildren ( maxChildren ) ; maxChildrenFetchFilter . setModel ( extension . getModel ( ) ) ; MaxChildrenParseFilter maxChildrenParseFilter = new MaxChildrenParseFilter ( extension . getMessages ( ) ) ; maxChildrenParseFilter . setMaxChildren ( maxChildren ) ; maxChildrenParseFilter . setModel ( extension . getModel ( ) ) ; objs . add ( maxChildrenFetchFilter ) ; objs . add ( maxChildrenParseFilter ) ; } return extension . startScan ( target , user , objs . toArray ( new Object [ objs . size ( ) ] ) ) ;
public class ArteVideoDetailsDeserializer { /** * liefert die erste Ausstrahlung des Typs ohne Berücksichtigung der CatchupRights */ private static String getBroadcastDateIgnoringCatchupRights ( JsonArray broadcastArray , String broadcastType ) { } }
String broadcastDate = "" ; for ( int i = 0 ; i < broadcastArray . size ( ) ; i ++ ) { JsonObject broadcastObject = broadcastArray . get ( i ) . getAsJsonObject ( ) ; if ( broadcastObject . has ( JSON_ELEMENT_BROADCASTTYPE ) && broadcastObject . has ( JSON_ELEMENT_BROADCAST ) ) { String type = broadcastObject . get ( JSON_ELEMENT_BROADCASTTYPE ) . getAsString ( ) ; if ( type . equals ( broadcastType ) ) { if ( ! broadcastObject . get ( JSON_ELEMENT_BROADCAST ) . isJsonNull ( ) ) { broadcastDate = ( broadcastObject . get ( JSON_ELEMENT_BROADCAST ) . getAsString ( ) ) ; } } } } return broadcastDate ;
public class UserLoginModule { /** * Abort the login . * @ return true if abort was successful */ public final boolean abort ( ) { } }
boolean ret = false ; if ( UserLoginModule . LOG . isDebugEnabled ( ) ) { UserLoginModule . LOG . debug ( "Abort of " + this . principal ) ; } // If our authentication was successful , just return false if ( this . principal != null ) { // Clean up if overall authentication failed if ( this . committed ) { this . subject . getPrincipals ( ) . remove ( this . principal ) ; } this . committed = false ; this . principal = null ; ret = true ; } return ret ;
public class SecurityGroupUtils { /** * Provides a quick answer to whether a security group exists . * @ param ec2 * the EC2 client to use for making service requests * @ param securityGroupName * the name of the security group being queried * @ throws AmazonClientException * If any internal errors are encountered inside the client * while attempting to make the request or handle the response . * For example if a network connection is not available . * @ throws AmazonServiceException * If an error response is returned by AmazonEC2 indicating * either a problem with the data in the request , or a server * side issue . */ public static boolean doesSecurityGroupExist ( AmazonEC2 ec2 , String securityGroupName ) throws AmazonClientException , AmazonServiceException { } }
DescribeSecurityGroupsRequest securityGroupsRequest = new DescribeSecurityGroupsRequest ( ) . withGroupNames ( securityGroupName ) ; try { ec2 . describeSecurityGroups ( securityGroupsRequest ) ; return true ; } catch ( AmazonServiceException ase ) { if ( INVALID_GROUP_NOT_FOUND . equals ( ase . getErrorCode ( ) ) ) { return false ; } throw ase ; }
public class XSLTAttributeDef { /** * Process an attribute string of type T _ QNAMES into a vector of QNames where * the specification requires that non - prefixed elements not be placed in a * namespace . ( See section 2.4 of XSLT 1.0 . ) * @ param handler non - null reference to current StylesheetHandler that is constructing the Templates . * @ param uri The Namespace URI , or an empty string . * @ param name The local name ( without prefix ) , or empty string if not namespace processing . * @ param rawName The qualified name ( with prefix ) . * @ param value A whitespace delimited list of qualified names . * @ return a Vector of QName objects . * @ throws org . xml . sax . SAXException if the one of the qualified name strings * contains a prefix that can not be * resolved , or a qualified name contains syntax that is invalid for a qualified name . */ Vector processQNAMES ( StylesheetHandler handler , String uri , String name , String rawName , String value ) throws org . xml . sax . SAXException { } }
StringTokenizer tokenizer = new StringTokenizer ( value , " \t\n\r\f" ) ; int nQNames = tokenizer . countTokens ( ) ; Vector qnames = new Vector ( nQNames ) ; for ( int i = 0 ; i < nQNames ; i ++ ) { // Fix from Alexander Rudnev qnames . addElement ( new QName ( tokenizer . nextToken ( ) , handler ) ) ; } return qnames ;
public class ServletHandler { /** * Get context attribute names . * Combines ServletHandler and HttpContext attributes . */ protected Enumeration getContextAttributeNames ( ) { } }
if ( _attributes . size ( ) == 0 ) return getHttpContext ( ) . getAttributeNames ( ) ; HashSet set = new HashSet ( _attributes . keySet ( ) ) ; Enumeration e = getHttpContext ( ) . getAttributeNames ( ) ; while ( e . hasMoreElements ( ) ) set . add ( e . nextElement ( ) ) ; return Collections . enumeration ( set ) ;
public class PluginConfigurationReaderImpl { /** * Read the catalogs from an { @ link URL } . * @ param pluginUrl The { @ link URL } . * @ return The { @ link JqassistantPlugin } . */ private JqassistantPlugin readPlugin ( URL pluginUrl ) { } }
try ( InputStream inputStream = new BufferedInputStream ( pluginUrl . openStream ( ) ) ) { return jaxbUnmarshaller . unmarshal ( inputStream ) ; } catch ( IOException e ) { throw new IllegalStateException ( "Cannot read plugin from " + pluginUrl . toString ( ) , e ) ; }
public class RequestHelper { /** * Get the user agent from the given request . * @ param aHttpRequest * The HTTP request to get the UA from . * @ return < code > null < / code > if no user agent string is present */ @ Nullable public static String getHttpUserAgentStringFromRequest ( @ Nonnull final HttpServletRequest aHttpRequest ) { } }
// Use non - standard headers first String sUserAgent = aHttpRequest . getHeader ( CHttpHeader . UA ) ; if ( sUserAgent == null ) { sUserAgent = aHttpRequest . getHeader ( CHttpHeader . X_DEVICE_USER_AGENT ) ; if ( sUserAgent == null ) sUserAgent = aHttpRequest . getHeader ( CHttpHeader . USER_AGENT ) ; } return sUserAgent ;
public class BeanProperty { /** * Sets the property value on the object . Attempts to first set the * property via its setter method such as " setFirstName ( ) " . If a setter * method doesn ' t exist , this will then attempt to set the property value * directly on the underlying field within the class . * < br > * NOTE : If the setter method throws an exception during execution , the * exception will be accessible in the getCause ( ) method of the * InvocationTargetException . * @ param obj The object to set the property on * @ param value The value of the property * @ throws java . lang . IllegalAccessException Thrown if an access exception * occurs while attempting to set the property value . * @ throws java . lang . reflect . InvocationTargetException Thrown if there * is an exception thrown while calling the underlying method . */ public void set ( Object obj , Object value ) throws IllegalAccessException , InvocationTargetException { } }
// always try the " setMethod " first if ( setMethod != null ) { setMethod . invoke ( obj , value ) ; // fall back to setting the field directly } else if ( field != null ) { field . set ( obj , value ) ; } else { throw new IllegalAccessException ( "Cannot set property value" ) ; }
public class AbstractIntDoubleMap { /** * Assigns the result of a function to each value ; < tt > v [ i ] = function ( v [ i ] ) < / tt > . * @ param function a function object taking as argument the current association ' s value . */ public void assign ( final cern . colt . function . DoubleFunction function ) { } }
copy ( ) . forEachPair ( new cern . colt . function . IntDoubleProcedure ( ) { public boolean apply ( int key , double value ) { put ( key , function . apply ( value ) ) ; return true ; } } ) ;
public class AbstractDataDistributionType { /** * { @ inheritDoc } */ public void removeDataNode ( Node rootNode , String dataId ) throws RepositoryException { } }
Node parentNode = null ; try { Node node = getDataNode ( rootNode , dataId ) ; parentNode = node . getParent ( ) ; node . remove ( ) ; parentNode . save ( ) ; } catch ( InvalidItemStateException e ) { if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( "An exception occurred: " + e . getMessage ( ) ) ; } if ( parentNode != null ) parentNode . refresh ( false ) ; } catch ( PathNotFoundException e ) { if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( "An exception occurred: " + e . getMessage ( ) ) ; } }