signature stringlengths 43 39.1k | implementation stringlengths 0 450k |
|---|---|
public class RectangleConstraintSolver { /** * Output a Gnuplot - readable script that draws , for each given
* { @ link RectangularRegion } , a rectangle which is close to the
* " center " of the { @ link RectangularRegion } ' s domain .
* @ param horizon The maximum X and Y coordinate to be used in the plot .
* @ param rect The set of { @ link RectangularRegion } s to draw .
* @ return A Gnuplot script . */
public String drawAlmostCentreRectangle ( long horizon , HashMap < String , Rectangle > rect ) { } } | String ret = "" ; int j = 1 ; ret = "set xrange [0:" + horizon + "]" + "\n" ; ret += "set yrange [0:" + horizon + "]" + "\n" ; int i = 0 ; for ( String str : rect . keySet ( ) ) { // rec
ret += "set obj " + j + " rect from " + rect . get ( str ) . getMinX ( ) + "," + rect . get ( str ) . getMinY ( ) + " to " + rect . get ( str ) . getMaxX ( ) + "," + rect . get ( str ) . getMaxY ( ) + " front fs transparent solid 0.0 border " + ( i + 1 ) + " lw 2" + "\n" ; j ++ ; // label of centre Rec
ret += "set label " + "\"" + str + "\"" + " at " + rect . get ( str ) . getCenterX ( ) + "," + rect . get ( str ) . getCenterY ( ) + " textcolor lt " + ( i + 1 ) + " font \"9\"" + "\n" ; j ++ ; i ++ ; } ret += "plot " + "NaN" + "\n" ; ret += "pause -1" ; return ret ; |
public class GeoJsonReaderDriver { /** * Parses a GeoJSON geometry and returns its JTS representation .
* Syntax :
* " geometry " : { " type " : " Point " , " coordinates " : [ 102.0,0.5 ] }
* @ param jp
* @ throws IOException
* @ return Geometry */
private Geometry parseGeometry ( JsonParser jp , String geometryType ) throws IOException , SQLException { } } | if ( geometryType . equalsIgnoreCase ( GeoJsonField . POINT ) ) { return parsePoint ( jp ) ; } else if ( geometryType . equalsIgnoreCase ( GeoJsonField . MULTIPOINT ) ) { return parseMultiPoint ( jp ) ; } else if ( geometryType . equalsIgnoreCase ( GeoJsonField . LINESTRING ) ) { return parseLinestring ( jp ) ; } else if ( geometryType . equalsIgnoreCase ( GeoJsonField . MULTILINESTRING ) ) { return parseMultiLinestring ( jp ) ; } else if ( geometryType . equalsIgnoreCase ( GeoJsonField . POLYGON ) ) { return parsePolygon ( jp ) ; } else if ( geometryType . equalsIgnoreCase ( GeoJsonField . MULTIPOLYGON ) ) { return parseMultiPolygon ( jp ) ; } else if ( geometryType . equalsIgnoreCase ( GeoJsonField . GEOMETRYCOLLECTION ) ) { return parseGeometryCollection ( jp ) ; } else { throw new SQLException ( "Unsupported geometry : " + geometryType ) ; } |
public class RepositoryApplicationConfiguration { /** * { @ link DefaultRolloutApprovalStrategy } bean .
* @ return a new { @ link RolloutApprovalStrategy } */
@ Bean @ ConditionalOnMissingBean RolloutApprovalStrategy rolloutApprovalStrategy ( final UserDetailsService userDetailsService , final TenantConfigurationManagement tenantConfigurationManagement , final SystemSecurityContext systemSecurityContext ) { } } | return new DefaultRolloutApprovalStrategy ( userDetailsService , tenantConfigurationManagement , systemSecurityContext ) ; |
public class TypeDef { /** * The method adapts constructor method to the current class . It unsets any name that may be
* presetn in the method . It also sets as a return type a reference to the current type . */
private static List < Method > adaptConstructors ( List < Method > methods , TypeDef target ) { } } | List < Method > adapted = new ArrayList < Method > ( ) ; for ( Method m : methods ) { adapted . add ( new MethodBuilder ( m ) . withName ( null ) . withReturnType ( target . toUnboundedReference ( ) ) . build ( ) ) ; } return adapted ; |
public class AbstractExpectationManager { /** * The resulting future will only return after such a message is received
* that makes the condition true .
* @ param condition
* Condition to be true .
* @ return The future . */
public synchronized Future < Message > setExpectation ( final C condition , final MessageAction < P > action ) { } } | if ( this . isStopped ( ) ) { throw new IllegalStateException ( "Already stopped." ) ; } final AbstractExpectation < C , P > expectation = this . createExpectation ( condition , action ) ; final Future < Message > future = AbstractExpectationManager . EXECUTOR . submit ( expectation ) ; this . expectations . put ( expectation , future ) ; AbstractExpectationManager . LOGGER . info ( "Registered expectation {} with action {}." , expectation , action ) ; return future ; |
public class nspbr6 { /** * Use this API to add nspbr6. */
public static base_response add ( nitro_service client , nspbr6 resource ) throws Exception { } } | nspbr6 addresource = new nspbr6 ( ) ; addresource . name = resource . name ; addresource . td = resource . td ; addresource . action = resource . action ; addresource . srcipv6 = resource . srcipv6 ; addresource . srcipop = resource . srcipop ; addresource . srcipv6val = resource . srcipv6val ; addresource . srcport = resource . srcport ; addresource . srcportop = resource . srcportop ; addresource . srcportval = resource . srcportval ; addresource . destipv6 = resource . destipv6 ; addresource . destipop = resource . destipop ; addresource . destipv6val = resource . destipv6val ; addresource . destport = resource . destport ; addresource . destportop = resource . destportop ; addresource . destportval = resource . destportval ; addresource . srcmac = resource . srcmac ; addresource . protocol = resource . protocol ; addresource . protocolnumber = resource . protocolnumber ; addresource . vlan = resource . vlan ; addresource . Interface = resource . Interface ; addresource . priority = resource . priority ; addresource . state = resource . state ; addresource . msr = resource . msr ; addresource . monitor = resource . monitor ; addresource . nexthop = resource . nexthop ; addresource . nexthopval = resource . nexthopval ; addresource . nexthopvlan = resource . nexthopvlan ; return addresource . add_resource ( client ) ; |
public class PageBase { /** * $ NON - NLS - 1 $ */
@ Override public ObservableList < Item > getPropertySheetItems ( ) { } } | ObservableList < Item > items = super . getPropertySheetItems ( ) ; items . add ( new Item ( ) { @ Override public Optional < ObservableValue < ? > > getObservableValue ( ) { return Optional . of ( showNavigationProperty ( ) ) ; } @ Override public void setValue ( Object value ) { setShowNavigation ( ( boolean ) value ) ; } @ Override public Object getValue ( ) { return isShowNavigation ( ) ; } @ Override public Class < ? > getType ( ) { return Boolean . class ; } @ Override public String getName ( ) { return "Show Navigation" ; // $ NON - NLS - 1 $
} @ Override public String getDescription ( ) { return "Navigation controls (back, forward, today)" ; // $ NON - NLS - 1 $
} @ Override public String getCategory ( ) { return PAGE_BASE_CATEGORY ; } } ) ; items . add ( new Item ( ) { @ Override public Optional < ObservableValue < ? > > getObservableValue ( ) { return Optional . of ( showDateProperty ( ) ) ; } @ Override public void setValue ( Object value ) { setShowDate ( ( boolean ) value ) ; } @ Override public Object getValue ( ) { return isShowDateHeader ( ) ; } @ Override public Class < ? > getType ( ) { return Boolean . class ; } @ Override public String getName ( ) { return "Show Date" ; // $ NON - NLS - 1 $
} @ Override public String getDescription ( ) { return "Header with current month, day, or year." ; // $ NON - NLS - 1 $
} @ Override public String getCategory ( ) { return PAGE_BASE_CATEGORY ; } } ) ; items . add ( new Item ( ) { @ Override public Optional < ObservableValue < ? > > getObservableValue ( ) { return Optional . of ( dateTimeFormatterProperty ( ) ) ; } @ Override public void setValue ( Object value ) { setDateTimeFormatter ( ( DateTimeFormatter ) value ) ; } @ Override public Object getValue ( ) { return getDateTimeFormatter ( ) ; } @ Override public Class < ? > getType ( ) { return DateTimeFormatter . class ; } @ Override public String getName ( ) { return "Date Time Formatter" ; // $ NON - NLS - 1 $
} @ Override public String getDescription ( ) { return "Date time formatter" ; // $ NON - NLS - 1 $
} @ Override public String getCategory ( ) { return PAGE_BASE_CATEGORY ; } } ) ; items . add ( new Item ( ) { @ Override public Optional < ObservableValue < ? > > getObservableValue ( ) { return Optional . of ( hiddenProperty ( ) ) ; } @ Override public void setValue ( Object value ) { setHidden ( ( boolean ) value ) ; } @ Override public Object getValue ( ) { return isHidden ( ) ; } @ Override public Class < ? > getType ( ) { return Boolean . class ; } @ Override public String getName ( ) { return "Hidden" ; // $ NON - NLS - 1 $
} @ Override public String getDescription ( ) { return "Hides the page from the user." ; // $ NON - NLS - 1 $
} @ Override public String getCategory ( ) { return PAGE_BASE_CATEGORY ; } } ) ; return items ; |
public class ProcessUtils { /** * 拷贝单个对象
* @ param r 目标对象
* @ param src 原对象
* @ param < T > 原数据类型
* @ param < R > 目标数据类型 */
public static < T , R > void processObject ( R r , T src ) { } } | processObject ( r , src , ( r1 , src1 ) -> { } ) ; |
public class BasicAgigaSentence { /** * / * ( non - Javadoc )
* @ see edu . jhu . hltcoe . sp . data . depparse . AgigaSentence # getParseText ( ) */
public String getParseText ( ) { } } | if ( parseTextFixedCache != null ) return parseTextFixedCache ; // check for leaves that have a space in them
// replace any spaces with non - breaking spaces 0xa0
final String nbsp = "\u00A0" ; // want to find terminals with more than one token
// leaves are not recursive , so its ok to use a regular expression
Matcher m = multiWordTerminalPat . matcher ( parseText ) ; int ptr = 0 ; StringBuilder sb = null ; while ( m . find ( ) ) { if ( sb == null ) sb = new StringBuilder ( ) ; int s = m . start ( ) ; int e = m . end ( ) ; // before match
sb . append ( parseText . substring ( ptr , s ) ) ; // match itself
String mid = parseText . substring ( s , e ) ; int mid_space = mid . indexOf ( " " ) ; sb . append ( mid . substring ( 0 , mid_space + 1 ) ) ; sb . append ( mid . substring ( mid_space + 1 ) . replaceAll ( "\\s+" , nbsp ) ) ; ptr = e ; } if ( sb == null ) { parseTextFixedCache = parseText ; } else { sb . append ( parseText . substring ( ptr ) ) ; parseTextFixedCache = sb . toString ( ) ; } return parseTextFixedCache ; |
public class DataIO { /** * Writes UTF - 8 encoded characters to the given stream , but does not write
* the length .
* @ param workspace temporary buffer to store characters in */
public static final void writeUTF ( OutputStream out , String str , char [ ] workspace ) throws IOException { } } | writeUTF ( out , str , 0 , str . length ( ) , workspace ) ; |
public class PnPInfinitesimalPlanePoseEstimation { /** * Computes reprojection error to select best model */
double computeError ( List < AssociatedPair > points , Se3_F64 worldToCamera ) { } } | double error = 0 ; for ( int i = 0 ; i < points . size ( ) ; i ++ ) { AssociatedPair pair = points . get ( i ) ; tmpP . set ( pair . p1 . x , pair . p1 . y , 0 ) ; SePointOps_F64 . transform ( worldToCamera , tmpP , tmpP ) ; error += pair . p2 . distance2 ( tmpP . x / tmpP . z , tmpP . y / tmpP . z ) ; } return Math . sqrt ( error / points . size ( ) ) ; |
public class JunctionTree { /** * Compute the message that gets passed from startFactor to destFactor . */
private void passMessage ( CliqueTree cliqueTree , int startFactor , int destFactor , boolean useSumProduct ) { } } | VariableNumMap sharedVars = cliqueTree . getFactor ( startFactor ) . getVars ( ) . intersection ( cliqueTree . getFactor ( destFactor ) . getVars ( ) ) ; // Find the factors which have yet to be merged into the marginal
// distribution of factor , but are necessary for computing the
// specified message .
Set < Integer > factorIndicesToCombine = Sets . newHashSet ( cliqueTree . getNeighboringFactors ( startFactor ) ) ; factorIndicesToCombine . removeAll ( cliqueTree . getFactorsInMarginal ( startFactor ) ) ; // If this is the upstream round of message passing , we might not have
// received a message from destFactor yet . However , if we have received the
// message , we should include it in the product as it will increase sparsity
// and thereby improve efficiency .
if ( cliqueTree . getMessage ( destFactor , startFactor ) == null ) { factorIndicesToCombine . remove ( destFactor ) ; } List < Factor > factorsToCombine = new ArrayList < Factor > ( ) ; for ( Integer adjacentFactorNum : factorIndicesToCombine ) { factorsToCombine . add ( cliqueTree . getMessage ( adjacentFactorNum , startFactor ) ) ; } // Update the marginal distribution of startFactor in the clique tree .
Factor updatedMarginal = cliqueTree . getMarginal ( startFactor ) . product ( factorsToCombine ) ; if ( cliqueTree . getOutboundFactors ( startFactor ) . size ( ) == 0 ) { // If this factor has yet to send any outbound messages , we can
// use it to renormalize the probability distribution to avoid
// possible numerical overflow issues . Updating the marginal
// at this point is equivalent to multiplying the original factor
// by some constant value , which doesn ' t affect the probability
// distribution .
if ( renormalize ) { updatedMarginal = updatedMarginal . product ( 1.0 / updatedMarginal . getTotalUnnormalizedProbability ( ) ) ; } // Can also prune this marginal if a pruning strategy was provided .
if ( pruningStrategy != null ) { updatedMarginal = pruningStrategy . apply ( updatedMarginal ) ; } } cliqueTree . setMarginal ( startFactor , updatedMarginal ) ; cliqueTree . addFactorsToMarginal ( startFactor , factorIndicesToCombine ) ; // The message from startFactor to destFactor is the marginal of
// productFactor , divided by the message from destFactor to
// startFactor , if it exists .
Factor messageFactor = null ; if ( useSumProduct ) { messageFactor = updatedMarginal . marginalize ( updatedMarginal . getVars ( ) . removeAll ( sharedVars ) . getVariableNums ( ) ) ; } else { messageFactor = updatedMarginal . maxMarginalize ( updatedMarginal . getVars ( ) . removeAll ( sharedVars ) . getVariableNums ( ) ) ; } // Divide out the destFactor - > startFactor message if necessary .
if ( cliqueTree . getFactorsInMarginal ( startFactor ) . contains ( destFactor ) ) { messageFactor = messageFactor . product ( cliqueTree . getMessage ( destFactor , startFactor ) . inverse ( ) ) ; } cliqueTree . addMessage ( startFactor , destFactor , messageFactor ) ; |
public class OdsElements { /** * Write the settings element to a writer .
* @ param xmlUtil the xml util
* @ param writer the writer
* @ throws IOException if write fails */
public void writeSettings ( final XMLUtil xmlUtil , final ZipUTF8Writer writer ) throws IOException { } } | this . settingsElement . setTables ( this . getTables ( ) ) ; this . logger . log ( Level . FINER , "Writing odselement: settingsElement to zip file" ) ; this . settingsElement . write ( xmlUtil , writer ) ; |
public class RepositoryOptionImpl { /** * Returns the full repository url .
* @ return the full repository as given plus eventual snapshot / release tags ( cannot be null or empty )
* @ throws IllegalStateException - if both snapshots and releases are not allowed */
public String getRepository ( ) { } } | if ( ! m_allowReleases && ! m_allowSnapshots ) { throw new IllegalStateException ( "Does not make sense to disallow both releases and snapshots." ) ; } final StringBuilder repositoryUrl = new StringBuilder ( ) ; repositoryUrl . append ( m_repositoryUrl ) ; if ( m_allowSnapshots ) { repositoryUrl . append ( "@snapshots" ) ; } if ( ! m_allowReleases ) { repositoryUrl . append ( "@noreleases" ) ; } return repositoryUrl . toString ( ) ; |
public class Utils { /** * Copies the original byte array content to a new byte array . The resulting byte array is always
* " length " size . If length is smaller than the original byte array , the resulting byte array is
* truncated . If length is bigger than the original byte array , the resulting byte array is filled
* with zero bytes .
* @ param orig the original byte array
* @ param length how big the resulting byte array will be
* @ return the copied byte array */
public static byte [ ] copyWithLength ( byte [ ] orig , int length ) { } } | // No need to initialize with zero bytes , because the bytes are already initialized with that
byte [ ] result = new byte [ length ] ; int howMuchToCopy = length < orig . length ? length : orig . length ; System . arraycopy ( orig , 0 , result , 0 , howMuchToCopy ) ; return result ; |
public class Exhibitor { /** * Return this VM ' s hostname if possible
* @ return hostname */
public static String getHostname ( ) { } } | String host = "unknown" ; try { return InetAddress . getLocalHost ( ) . getHostName ( ) ; } catch ( UnknownHostException e ) { // ignore
} return host ; |
public class BinaryExpressionEvaluator { /** * end numeric */
@ Override public Value caseAPlusPlusBinaryExp ( APlusPlusBinaryExp node , Context ctxt ) throws AnalysisException { } } | // breakpoint . check ( location , ctxt ) ;
node . getLocation ( ) . hit ( ) ; // Mark as covered
try { Value lv = node . getLeft ( ) . apply ( VdmRuntime . getExpressionEvaluator ( ) , ctxt ) . deref ( ) ; Value rv = node . getRight ( ) . apply ( VdmRuntime . getExpressionEvaluator ( ) , ctxt ) ; if ( lv instanceof MapValue ) { ValueMap lm = new ValueMap ( lv . mapValue ( ctxt ) ) ; ValueMap rm = rv . mapValue ( ctxt ) ; for ( Value k : rm . keySet ( ) ) { lm . put ( k , rm . get ( k ) ) ; } return new MapValue ( lm ) ; } else { ValueList seq = lv . seqValue ( ctxt ) ; ValueMap map = rv . mapValue ( ctxt ) ; ValueList result = new ValueList ( seq ) ; for ( Value k : map . keySet ( ) ) { int iv = ( int ) k . intValue ( ctxt ) ; if ( iv < 1 || iv > seq . size ( ) ) { VdmRuntimeError . abort ( node . getLocation ( ) , 4025 , "Map key not within sequence index range: " + k , ctxt ) ; } result . set ( iv - 1 , map . get ( k ) ) ; } return new SeqValue ( result ) ; } } catch ( ValueException e ) { return VdmRuntimeError . abort ( node . getLocation ( ) , e ) ; } |
public class InterceptionDecorationContext { /** * Peeks the current top of the stack or returns null if the stack is empty
* @ return the current top of the stack or returns null if the stack is empty */
public static CombinedInterceptorAndDecoratorStackMethodHandler peekIfNotEmpty ( ) { } } | Stack stack = interceptionContexts . get ( ) ; if ( stack == null ) { return null ; } return stack . peek ( ) ; |
public class AmqpProperties { /** * Sets the value of " type " property . If a null value is passed
* in , it indicates that the property is not set .
* @ param type value of " type " property */
public void setType ( String type ) { } } | if ( type == null ) { _properties . remove ( AMQP_PROP_TYPE ) ; return ; } _properties . put ( AMQP_PROP_TYPE , type ) ; |
public class AppServiceEnvironmentsInner { /** * List all currently running operations on the App Service Environment .
* List all currently running operations on the App Service Environment .
* @ param resourceGroupName Name of the resource group to which the resource belongs .
* @ param name Name of the App Service Environment .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable to the List & lt ; OperationInner & gt ; object */
public Observable < List < OperationInner > > listOperationsAsync ( String resourceGroupName , String name ) { } } | return listOperationsWithServiceResponseAsync ( resourceGroupName , name ) . map ( new Func1 < ServiceResponse < List < OperationInner > > , List < OperationInner > > ( ) { @ Override public List < OperationInner > call ( ServiceResponse < List < OperationInner > > response ) { return response . body ( ) ; } } ) ; |
public class DataService { /** * Method to populate the QueryResults hash map by reading the key from QueryResult entities
* @ param queryResults
* the queryResults hash map to be populated
* @ param queryResult
* the QueryResult object */
private void populateQueryResultsInCDC ( Map < String , QueryResult > queryResults , QueryResult queryResult ) { } } | if ( queryResult != null ) { List < ? extends IEntity > entities = queryResult . getEntities ( ) ; if ( entities != null && ! entities . isEmpty ( ) ) { IEntity entity = entities . get ( 0 ) ; String entityName = entity . getClass ( ) . getSimpleName ( ) ; queryResults . put ( entityName , queryResult ) ; } } |
public class MultiPointerGestureDetector { /** * Resets the component to the initial state . */
public void reset ( ) { } } | mGestureInProgress = false ; mPointerCount = 0 ; for ( int i = 0 ; i < MAX_POINTERS ; i ++ ) { mId [ i ] = MotionEvent . INVALID_POINTER_ID ; } |
public class FieldsAndGetters { /** * Creates an immutable Map . Entry . */
private static < K , V > Map . Entry < K , V > createEntry ( K key , V value ) { } } | return new Map . Entry < K , V > ( ) { @ Override public K getKey ( ) { return key ; } @ Override public V getValue ( ) { return value ; } @ Override public V setValue ( V value ) { throw new UnsupportedOperationException ( ) ; } } ; |
public class RESTMBeanServerConnection { /** * { @ inheritDoc } */
@ Override public Object invoke ( ObjectName name , String operationName , Object [ ] params , String [ ] signature ) throws InstanceNotFoundException , MBeanException , ReflectionException , IOException { } } | final String sourceMethod = "invoke" ; try { // Look for routing MBean
if ( ClientProvider . CONNECTION_ROUTING_NAME . equals ( name . getKeyProperty ( "name" ) ) && ClientProvider . CONNECTION_ROUTING_DOMAIN . equals ( name . getDomain ( ) ) ) { // Handle server - level routing
if ( ClientProvider . CONNECTION_ROUTING_OPERATION_ASSIGN_SERVER . equals ( operationName ) ) { if ( params . length == 3 ) { // routing at server level
this . mapRouting = new HashMap < String , Object > ( ) ; this . mapRouting . put ( ClientProvider . ROUTING_KEY_HOST_NAME , params [ 0 ] ) ; this . mapRouting . put ( ClientProvider . ROUTING_KEY_SERVER_USER_DIR , params [ 1 ] ) ; this . mapRouting . put ( ClientProvider . ROUTING_KEY_SERVER_NAME , params [ 2 ] ) ; return Boolean . TRUE ; } // Handle host - level routing
} else if ( ClientProvider . CONNECTION_ROUTING_OPERATION_ASSIGN_HOST . equals ( operationName ) ) { if ( params . length == 1 ) { // routing at host level
this . mapRouting = new HashMap < String , Object > ( ) ; this . mapRouting . put ( ClientProvider . ROUTING_KEY_HOST_NAME , params [ 0 ] ) ; return Boolean . TRUE ; } } } } catch ( Exception e ) { throw new MBeanException ( e ) ; } checkConnection ( ) ; // Special handling for file transfer MBean invocations
if ( ClientProvider . FILE_TRANSFER_NAME . equals ( name . getKeyProperty ( "name" ) ) && ClientProvider . FILE_TRANSFER_DOMAIN . equals ( name . getDomain ( ) ) ) { return fileTransferClient . handleOperation ( operationName , params ) ; } URL invokeURL = null ; HttpsURLConnection connection = null ; JSONConverter converter = JSONConverter . getConverter ( ) ; try { // Get URL for invoke operation
invokeURL = getOperationURL ( name , operationName ) ; // Get connection to server
connection = getConnection ( invokeURL , HttpMethod . POST ) ; // Create Invocation object
Invocation invocation = new Invocation ( ) ; invocation . params = params ; invocation . signature = signature ; // Write Invocation JSON to connection output stream
OutputStream output = connection . getOutputStream ( ) ; converter . writeInvocation ( output , invocation ) ; output . flush ( ) ; output . close ( ) ; } catch ( ConnectException ce ) { recoverConnection ( ce ) ; // Server is down ; not a client bug
throw ce ; } catch ( IntrospectionException intro ) { throw getRequestErrorException ( sourceMethod , intro , invokeURL ) ; } catch ( IOException io ) { throw getRequestErrorException ( sourceMethod , io , invokeURL ) ; } finally { JSONConverter . returnConverter ( converter ) ; } // Check response code from server
int responseCode = 0 ; try { responseCode = connection . getResponseCode ( ) ; } catch ( ConnectException ce ) { recoverConnection ( ce ) ; // Server is down ; not a client bug
throw ce ; } switch ( responseCode ) { case HttpURLConnection . HTTP_OK : converter = JSONConverter . getConverter ( ) ; try { // Process and return server response , which should be a POJO
return converter . readPOJO ( connection . getInputStream ( ) ) ; } catch ( ClassNotFoundException cnf ) { // Not a REST connector bug per se ; not need to log this case
throw new IOException ( RESTClientMessagesUtil . getMessage ( RESTClientMessagesUtil . SERVER_RESULT_EXCEPTION ) , cnf ) ; } catch ( Exception e ) { throw getResponseErrorException ( sourceMethod , e , invokeURL ) ; } finally { JSONConverter . returnConverter ( converter ) ; } case HttpURLConnection . HTTP_BAD_REQUEST : case HttpURLConnection . HTTP_INTERNAL_ERROR : try { // Server response should be a serialized Throwable
throw getServerThrowable ( sourceMethod , connection ) ; } catch ( InstanceNotFoundException inf ) { throw inf ; } catch ( RuntimeMBeanException rme ) { throw rme ; } catch ( MBeanException me ) { throw me ; } catch ( ReflectionException re ) { throw re ; } catch ( IOException io ) { throw io ; } catch ( Throwable t ) { throw new IOException ( RESTClientMessagesUtil . getMessage ( RESTClientMessagesUtil . UNEXPECTED_SERVER_THROWABLE ) , t ) ; } case HttpURLConnection . HTTP_UNAUTHORIZED : case HttpURLConnection . HTTP_FORBIDDEN : throw getBadCredentialsException ( responseCode , connection ) ; case HttpURLConnection . HTTP_GONE : case HttpURLConnection . HTTP_NOT_FOUND : IOException ioe = getResponseCodeErrorException ( sourceMethod , responseCode , connection ) ; recoverConnection ( ioe ) ; throw ioe ; default : IOException e = getResponseCodeErrorException ( sourceMethod , responseCode , connection ) ; throw e ; } |
public class DetourCommon { /** * / @ param [ in ] t The interpolation factor . [ Limits : 0 < = value < = 1.0] */
public static float [ ] vLerp ( float [ ] verts , int v1 , int v2 , float t ) { } } | float [ ] dest = new float [ 3 ] ; dest [ 0 ] = verts [ v1 + 0 ] + ( verts [ v2 + 0 ] - verts [ v1 + 0 ] ) * t ; dest [ 1 ] = verts [ v1 + 1 ] + ( verts [ v2 + 1 ] - verts [ v1 + 1 ] ) * t ; dest [ 2 ] = verts [ v1 + 2 ] + ( verts [ v2 + 2 ] - verts [ v1 + 2 ] ) * t ; return dest ; |
public class InstanceTarget { /** * The lifecycle events of the deployment to this target instance .
* @ return The lifecycle events of the deployment to this target instance . */
public java . util . List < LifecycleEvent > getLifecycleEvents ( ) { } } | if ( lifecycleEvents == null ) { lifecycleEvents = new com . amazonaws . internal . SdkInternalList < LifecycleEvent > ( ) ; } return lifecycleEvents ; |
public class StatementManager { /** * Removes one ( or all ) of the links between a session and a compiled
* statement . If the statement is not linked with any other session , it is
* removed from management .
* @ param csid the compiled statment identifier
* @ param sessionID the session identifier
* @ param freeAll if true , remove all links to the session */
synchronized void freeStatement ( long csid , long sessionID , boolean freeAll ) { } } | if ( csid == - 1 ) { // statement was never added
return ; } LongKeyIntValueHashMap scsMap = ( LongKeyIntValueHashMap ) sessionUseMap . get ( sessionID ) ; if ( scsMap == null ) { // statement already removed due to invalidation
return ; } int sessionUseCount = scsMap . get ( csid , 0 ) ; if ( sessionUseCount == 0 ) { // statement already removed due to invalidation
} else if ( sessionUseCount == 1 || freeAll ) { scsMap . remove ( csid ) ; int usecount = useMap . get ( csid , 0 ) ; if ( usecount == 0 ) { // statement already removed due to invalidation
} else if ( usecount == 1 ) { Statement cs = ( Statement ) csidMap . remove ( csid ) ; if ( cs != null ) { int schemaid = cs . getSchemaName ( ) . hashCode ( ) ; LongValueHashMap sqlMap = ( LongValueHashMap ) schemaMap . get ( schemaid ) ; String sql = ( String ) sqlLookup . remove ( csid ) ; sqlMap . remove ( sql ) ; } useMap . remove ( csid ) ; } else { useMap . put ( csid , usecount - 1 ) ; } } else { scsMap . put ( csid , sessionUseCount - 1 ) ; } |
public class Client { /** * ( non - Javadoc )
* @ see org . restcomm . protocols . ss7 . map . api . MAPDialogListener # onDialogProviderAbort
* ( org . restcomm . protocols . ss7 . map . api . MAPDialog , org . restcomm . protocols . ss7 . map . api . dialog . MAPAbortProviderReason ,
* org . restcomm . protocols . ss7 . map . api . dialog . MAPAbortSource ,
* org . restcomm . protocols . ss7 . map . api . primitives . MAPExtensionContainer ) */
@ Override public void onDialogProviderAbort ( MAPDialog mapDialog , MAPAbortProviderReason abortProviderReason , MAPAbortSource abortSource , MAPExtensionContainer extensionContainer ) { } } | logger . error ( String . format ( "onDialogProviderAbort for DialogId=%d MAPAbortProviderReason=%s MAPAbortSource=%s MAPExtensionContainer=%s" , mapDialog . getLocalDialogId ( ) , abortProviderReason , abortSource , extensionContainer ) ) ; this . csvWriter . incrementCounter ( ERROR_DIALOGS ) ; |
public class SpdySessionHandler { /** * need to synchronize to prevent new streams from being created while updating active streams */
private void updateInitialSendWindowSize ( int newInitialWindowSize ) { } } | int deltaWindowSize = newInitialWindowSize - initialSendWindowSize ; initialSendWindowSize = newInitialWindowSize ; spdySession . updateAllSendWindowSizes ( deltaWindowSize ) ; |
public class ThreadGroup { /** * Adds the specified Thread group to this group .
* @ param g the specified Thread group to be added
* @ exception IllegalThreadStateException If the Thread group has been destroyed . */
private final void add ( ThreadGroup g ) { } } | synchronized ( this ) { if ( destroyed ) { throw new IllegalThreadStateException ( ) ; } if ( groups == null ) { groups = new ThreadGroup [ 4 ] ; } else if ( ngroups == groups . length ) { groups = Arrays . copyOf ( groups , ngroups * 2 ) ; } groups [ ngroups ] = g ; // This is done last so it doesn ' t matter in case the
// thread is killed
ngroups ++ ; } |
public class ConnecResponse { /** * Count ithe number of record returned
* @ return number of records in the response */
public int getCount ( ) { } } | if ( metadata != null && metadata . get ( "count" ) != null ) { return Integer . parseInt ( metadata . get ( "count" ) ) ; } if ( entities == null && entity != null ) { return 1 ; } return 0 ; |
public class TableCellButtonRendererFactory { /** * Factory method for creating the new { @ link TableCellButtonRenderer } with the given string
* @ param text
* the text
* @ return the new { @ link TableCellButtonRenderer } */
public static TableCellButtonRenderer newTableCellButtonRenderer ( String text ) { } } | return new TableCellButtonRenderer ( null , null ) { private static final long serialVersionUID = 1L ; @ Override protected String onSetText ( final Object value ) { String currentText = text ; return currentText ; } } ; |
public class JAASSystem { /** * Returns all cached JAAS system in a set .
* @ return set of all loaded and cached JAAS systems */
public static Set < JAASSystem > getAllJAASSystems ( ) { } } | final Set < JAASSystem > ret = new HashSet < > ( ) ; final Cache < Long , JAASSystem > cache = InfinispanCache . get ( ) . < Long , JAASSystem > getCache ( JAASSystem . IDCACHE ) ; for ( final Map . Entry < Long , JAASSystem > entry : cache . entrySet ( ) ) { ret . add ( entry . getValue ( ) ) ; } return ret ; |
public class RuntimeResourceDefinition { /** * If this definition refers to a class which extends another resource definition type , this
* method will return the definition of the topmost resource . For example , if this definition
* refers to MyPatient2 , which extends MyPatient , which in turn extends Patient , this method
* will return the resource definition for Patient .
* If the definition has no parent , returns < code > this < / code > */
public RuntimeResourceDefinition getBaseDefinition ( ) { } } | validateSealed ( ) ; if ( myBaseDefinition == null ) { myBaseDefinition = myContext . getResourceDefinition ( myBaseType ) ; } return myBaseDefinition ; |
public class TarGenerator { /** * * * * * * Volt DB Extensions * * * * * */
public void write ( boolean outputToStream , boolean verbose ) throws IOException , TarMalformatException { } } | if ( TarFileOutputStream . debug ) { System . out . println ( RB . singleton . getString ( RB . WRITE_QUEUE_REPORT , entryQueue . size ( ) ) ) ; } TarEntrySupplicant entry ; try { for ( int i = 0 ; i < entryQueue . size ( ) ; i ++ ) { if ( verbose ) { System . out . print ( Integer . toString ( i + 1 ) + " / " + entryQueue . size ( ) + ' ' ) ; } entry = ( TarEntrySupplicant ) entryQueue . get ( i ) ; if ( verbose ) { System . out . print ( entry . getPath ( ) + "... " ) ; } if ( entry . getDataSize ( ) >= paxThreshold ) { entry . makeXentry ( ) . write ( ) ; if ( verbose ) { System . out . print ( "x... " ) ; } } entry . write ( ) ; archive . assertAtBlockBoundary ( ) ; if ( verbose ) { System . out . println ( ) ; } } if ( outputToStream ) { archive . finishStream ( ) ; return ; } archive . finish ( ) ; } catch ( IOException ioe ) { System . err . println ( ) ; // Exception should cause a report
try { // Just release resources from any Entry ' s input , which may be
// left open .
for ( int i = 0 ; i < entryQueue . size ( ) ; i ++ ) { ( ( TarEntrySupplicant ) entryQueue . get ( i ) ) . close ( ) ; } archive . close ( ) ; } catch ( IOException ne ) { // Too difficult to report every single error .
// More important that the user know about the original Exc .
} throw ioe ; } |
public class DataLabelingServiceClient { /** * Lists data items in a dataset . This API can be called after data are imported into dataset .
* Pagination is supported .
* < p > Sample code :
* < pre > < code >
* try ( DataLabelingServiceClient dataLabelingServiceClient = DataLabelingServiceClient . create ( ) ) {
* String formattedParent = DataLabelingServiceClient . formatDatasetName ( " [ PROJECT ] " , " [ DATASET ] " ) ;
* String filter = " " ;
* for ( DataItem element : dataLabelingServiceClient . listDataItems ( formattedParent , filter ) . iterateAll ( ) ) {
* / / doThingsWith ( element ) ;
* < / code > < / pre >
* @ param parent Required . Name of the dataset to list data items , format :
* projects / { project _ id } / datasets / { dataset _ id }
* @ param filter Optional . Filter is not supported at this moment .
* @ throws com . google . api . gax . rpc . ApiException if the remote call fails */
public final ListDataItemsPagedResponse listDataItems ( String parent , String filter ) { } } | DATASET_PATH_TEMPLATE . validate ( parent , "listDataItems" ) ; ListDataItemsRequest request = ListDataItemsRequest . newBuilder ( ) . setParent ( parent ) . setFilter ( filter ) . build ( ) ; return listDataItems ( request ) ; |
public class DateTimePickerBase { /** * { @ inheritDoc } */
@ Override public com . google . web . bindery . event . shared . HandlerRegistration addValidationChangedHandler ( ValidationChangedHandler handler ) { } } | return validatorMixin . addValidationChangedHandler ( handler ) ; |
public class ResettableInputStream { /** * Convenient factory method to construct a new resettable input stream for
* the given file input stream , converting any IOException into
* SdkClientException with the given error message .
* Note the creation of a { @ link ResettableInputStream } would entail
* physically opening a file . If the opened file is meant to be closed only
* ( in a finally block ) by the very same code block that created it , then it
* is necessary that the release method must not be called while the
* execution is made in other stack frames .
* In such case , as other stack frames may inadvertently or indirectly call
* the close method of the stream , the creator of the stream would need to
* explicitly disable the accidental closing via
* { @ link ResettableInputStream # disableClose ( ) } , so that the release method
* becomes the only way to truly close the opened file . */
public static ResettableInputStream newResettableInputStream ( FileInputStream fis , String errmsg ) { } } | try { return new ResettableInputStream ( fis ) ; } catch ( IOException e ) { throw new SdkClientException ( errmsg , e ) ; } |
public class AngularViewContextWrapper { /** * Copied from com . sun . faces . context . PartialViewContextImpl . May have to be adapted to future Mojarra or JSF versions . */
private void renderState ( FacesContext context ) throws IOException { } } | // Get the view state and write it to the response . .
PartialViewContext pvc = context . getPartialViewContext ( ) ; PartialResponseWriter writer = pvc . getPartialResponseWriter ( ) ; String viewStateId = Util . getViewStateId ( context ) ; writer . startUpdate ( viewStateId ) ; String state = context . getApplication ( ) . getStateManager ( ) . getViewState ( context ) ; writer . write ( state ) ; writer . endUpdate ( ) ; ClientWindow window = context . getExternalContext ( ) . getClientWindow ( ) ; if ( null != window ) { String clientWindowId = Util . getClientWindowId ( context ) ; writer . startUpdate ( clientWindowId ) ; writer . write ( window . getId ( ) ) ; writer . endUpdate ( ) ; } |
public class PoolsImpl { /** * Lists all of the pools in the specified account .
* @ param poolListOptions Additional parameters for the operation
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ throws BatchErrorException thrown if the request is rejected by server
* @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @ return the PagedList & lt ; CloudPool & gt ; object if successful . */
public PagedList < CloudPool > list ( final PoolListOptions poolListOptions ) { } } | ServiceResponseWithHeaders < Page < CloudPool > , PoolListHeaders > response = listSinglePageAsync ( poolListOptions ) . toBlocking ( ) . single ( ) ; return new PagedList < CloudPool > ( response . body ( ) ) { @ Override public Page < CloudPool > nextPage ( String nextPageLink ) { PoolListNextOptions poolListNextOptions = null ; if ( poolListOptions != null ) { poolListNextOptions = new PoolListNextOptions ( ) ; poolListNextOptions . withClientRequestId ( poolListOptions . clientRequestId ( ) ) ; poolListNextOptions . withReturnClientRequestId ( poolListOptions . returnClientRequestId ( ) ) ; poolListNextOptions . withOcpDate ( poolListOptions . ocpDate ( ) ) ; } return listNextSinglePageAsync ( nextPageLink , poolListNextOptions ) . toBlocking ( ) . single ( ) . body ( ) ; } } ; |
public class VaultsInner { /** * Update a key vault in the specified subscription .
* @ param resourceGroupName The name of the Resource Group to which the server belongs .
* @ param vaultName Name of the vault
* @ param parameters Parameters to patch the vault
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable to the VaultInner object */
public Observable < VaultInner > updateAsync ( String resourceGroupName , String vaultName , VaultPatchParameters parameters ) { } } | return updateWithServiceResponseAsync ( resourceGroupName , vaultName , parameters ) . map ( new Func1 < ServiceResponse < VaultInner > , VaultInner > ( ) { @ Override public VaultInner call ( ServiceResponse < VaultInner > response ) { return response . body ( ) ; } } ) ; |
public class DestinationManager { /** * Checks if this destination is a queue / service or port
* @ param type */
private boolean isQueue ( DestinationType type ) { } } | boolean isQueue = false ; if ( type == DestinationType . QUEUE || type == DestinationType . PORT ) // type = = DestinationType . SERVICE )
isQueue = true ; return isQueue ; |
public class PerformanceMeasures { /** * Adds a measure . This is a convenient method for
* { @ code addMeasure ( k . toString ( ) , i ) } . If the key does not exist , for now
* it is simply ignored without any warning .
* @ param key
* one of the available keys { @ link Key } as string .
* @ param value
* the value for the specified key */
public void addMeasure ( String key , int value ) { } } | Key k = Key . fromString ( key ) ; if ( k != null ) { measures . put ( k , value ) ; } else { // TODO for now , we just ignore the value
} |
public class Automaton { /** * Método de terminación Si se fijo externamente a finalizado , ha finalizado
* Si la duración no se fijo a - 1 y el tiempo que queda es menor a 0 , ha
* finalizado En otro caso no ha terminado . Este método puede redefinirse en
* clases hijas ( por ejemplo para comparar si se ha llegado a un destino ) .
* Se recomienda llamar al finish padre en el método redefinido . Si este
* método devuelve true , en el control del autómata hay que fijar a true la
* finalización ( ver método nextState )
* @ param phatInterface
* @ return */
public boolean isFinished ( PHATInterface phatInterface ) { } } | if ( finishCondition != null && finishCondition . evaluate ( agent ) ) { return true ; } return false ; |
public class DbUtils { /** * Loads and registers a database driver class .
* If this succeeds , it returns true , else it returns false .
* @ param driverClassName of driver to load
* @ return boolean < code > true < / code > if the driver was found , otherwise < code > false < / code > */
public static boolean loadDriver ( String driverClassName ) { } } | try { Class . forName ( driverClassName ) . newInstance ( ) ; return true ; } catch ( ClassNotFoundException e ) { return false ; } catch ( IllegalAccessException e ) { // Constructor is private , OK for DriverManager contract
return true ; } catch ( InstantiationException e ) { return false ; } catch ( Throwable e ) { return false ; } |
public class Message { /** * setzen des feldes " nachrichtengroesse " im nachrichtenkopf einer nachricht */
private void setMsgSizeValue ( int value , boolean allowOverwrite ) { } } | String absPath = getPath ( ) + ".MsgHead.msgsize" ; SyntaxElement msgsizeElem = getElement ( absPath ) ; if ( msgsizeElem == null ) throw new NoSuchPathException ( absPath ) ; int size = ( ( DE ) msgsizeElem ) . getMinSize ( ) ; char [ ] zeros = new char [ size ] ; Arrays . fill ( zeros , '0' ) ; DecimalFormat df = new DecimalFormat ( String . valueOf ( zeros ) ) ; if ( ! propagateValue ( absPath , df . format ( value ) , DONT_TRY_TO_CREATE , allowOverwrite ) ) throw new NoSuchPathException ( absPath ) ; |
public class BeanUtil { /** * Returns the type of < code > property < / code > in given < code > beanClass < / code >
* @ param beanClass bean class
* @ param property name of the property
* @ return null if the property is not found . otherwise returns the type of the < code > property < / code > */
public static Class getPropertyType ( Class < ? > beanClass , String property ) { } } | Method getter = getGetterMethod ( beanClass , property ) ; if ( getter == null ) return null ; else return getter . getReturnType ( ) ; |
public class OptionalDouble { /** * Invokes the given mapping function on inner value if present .
* @ param mapper mapping function
* @ return an { @ code OptionalInt } with transformed value if present ,
* otherwise an empty { @ code OptionalInt }
* @ throws NullPointerException if value is present and
* { @ code mapper } is { @ code null } */
@ NotNull public OptionalInt mapToInt ( @ NotNull DoubleToIntFunction mapper ) { } } | if ( ! isPresent ( ) ) { return OptionalInt . empty ( ) ; } Objects . requireNonNull ( mapper ) ; return OptionalInt . of ( mapper . applyAsInt ( value ) ) ; |
public class MinioClient { /** * Calculates multipart size of given size and returns three element array contains part size , part count
* and last part size . */
private static int [ ] calculateMultipartSize ( long size ) throws InvalidArgumentException { } } | if ( size > MAX_OBJECT_SIZE ) { throw new InvalidArgumentException ( "size " + size + " is greater than allowed size 5TiB" ) ; } double partSize = Math . ceil ( ( double ) size / MAX_MULTIPART_COUNT ) ; partSize = Math . ceil ( partSize / MIN_MULTIPART_SIZE ) * MIN_MULTIPART_SIZE ; double partCount = Math . ceil ( size / partSize ) ; double lastPartSize = partSize - ( partSize * partCount - size ) ; if ( lastPartSize == 0.0 ) { lastPartSize = partSize ; } return new int [ ] { ( int ) partSize , ( int ) partCount , ( int ) lastPartSize } ; |
public class BaseTool { /** * Run the tool ' s lifecycle given the input arguments .
* @ param args the cli arg vector
* @ throws CLIToolException if an error occurs */
public void run ( final String [ ] args ) throws CLIToolException { } } | PropertyConfigurator . configure ( Constants . getLog4jPropertiesFile ( ) . getAbsolutePath ( ) ) ; CommandLine cli = parseArgs ( args ) ; validateOptions ( cli , args ) ; go ( ) ; |
public class JDBC4ResultSet { /** * language . */
@ Override public URL getURL ( int columnIndex ) throws SQLException { } } | checkColumnBounds ( columnIndex ) ; try { return new URL ( table . getString ( columnIndex - 1 ) ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } |
public class ResourceUtil { /** * return the size of the Resource , other than method length of Resource this method return the size
* of all files in a directory
* @ param collectionDir
* @ return */
public static long getRealSize ( Resource res , ResourceFilter filter ) { } } | if ( res . isFile ( ) ) { return res . length ( ) ; } else if ( res . isDirectory ( ) ) { long size = 0 ; Resource [ ] children = filter == null ? res . listResources ( ) : res . listResources ( filter ) ; for ( int i = 0 ; i < children . length ; i ++ ) { size += getRealSize ( children [ i ] ) ; } return size ; } return 0 ; |
public class VirtualNetworkGatewaysInner { /** * Generates VPN profile for P2S client of the virtual network gateway in the specified resource group . Used for IKEV2 and radius based authentication .
* @ param resourceGroupName The name of the resource group .
* @ param virtualNetworkGatewayName The name of the virtual network gateway .
* @ param parameters Parameters supplied to the generate virtual network gateway VPN client package operation .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ throws CloudException thrown if the request is rejected by server
* @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @ return the String object if successful . */
public String generateVpnProfile ( String resourceGroupName , String virtualNetworkGatewayName , VpnClientParameters parameters ) { } } | return generateVpnProfileWithServiceResponseAsync ( resourceGroupName , virtualNetworkGatewayName , parameters ) . toBlocking ( ) . last ( ) . body ( ) ; |
public class RouteDispatcher { /** * Dispatches the Request / Response .
* @ param request
* @ param response
* @ throws IOException
* @ throws ServletException */
public void dispatch ( Request request , Response response ) throws IOException , ServletException { } } | onPreDispatch ( request , response ) ; onRouteDispatch ( request , response ) ; onPostDispatch ( request , response ) ; |
public class JSONConverter { /** * Decode a JSON document to retrieve an MBeanQuery instance .
* @ param in The stream to read JSON from
* @ return The decoded MBeanQuery instance
* @ throws ConversionException If JSON uses unexpected structure / format
* @ throws IOException If an I / O error occurs or if JSON is ill - formed .
* @ throws ClassNotFoundException If needed class can ' t be found .
* @ see # writeMBeanQuery ( OutputStream , MBeanQuery ) */
public MBeanQuery readMBeanQuery ( InputStream in ) throws ConversionException , IOException , ClassNotFoundException { } } | JSONObject json = parseObject ( in ) ; MBeanQuery ret = new MBeanQuery ( ) ; ret . objectName = readObjectName ( json . get ( N_OBJECTNAME ) ) ; Object queryExp = readSerialized ( json . get ( N_QUERYEXP ) ) ; if ( queryExp != null && ! ( queryExp instanceof QueryExp ) ) { throwConversionException ( "readMBeanQuery() receives an instance that's not a QueryExp." , json . get ( N_QUERYEXP ) ) ; } ret . queryExp = ( QueryExp ) queryExp ; ret . className = readStringInternal ( json . get ( N_CLASSNAME ) ) ; return ret ; |
public class LayoutFactory { /** * Read a layout as written by { @ link Layout # writeTo } .
* @ since 1.2.2 */
public Layout readLayoutFrom ( InputStream in ) throws IOException , RepositoryException { } } | Transaction txn = mRepository . enterTransaction ( ) ; try { txn . setForUpdate ( true ) ; StoredLayout storedLayout = mLayoutStorage . prepare ( ) ; storedLayout . readFrom ( in ) ; try { storedLayout . insert ( ) ; } catch ( UniqueConstraintException e ) { StoredLayout existing = mLayoutStorage . prepare ( ) ; storedLayout . copyPrimaryKeyProperties ( existing ) ; if ( existing . tryLoad ( ) ) { // Only check subset of primary and alternate keys . The check
// of layout properties is more important .
if ( ! existing . getStorableTypeName ( ) . equals ( storedLayout . getStorableTypeName ( ) ) ) { throw e ; } storedLayout = existing ; } else { // Assume alternate key constraint , so increment the generation .
storedLayout . setGeneration ( nextGeneration ( mRepository , storedLayout . getStorableTypeName ( ) ) ) ; storedLayout . insert ( ) ; } } int op ; while ( ( op = in . read ( ) ) != 0 ) { StoredLayoutProperty storedProperty = mPropertyStorage . prepare ( ) ; storedProperty . readFrom ( in ) ; try { storedProperty . insert ( ) ; } catch ( UniqueConstraintException e ) { StoredLayoutProperty existing = mPropertyStorage . prepare ( ) ; storedProperty . copyPrimaryKeyProperties ( existing ) ; if ( ! existing . tryLoad ( ) ) { throw e ; } storedProperty . copyVersionProperty ( existing ) ; if ( ! existing . equalProperties ( storedProperty ) ) { throw e ; } } } txn . commit ( ) ; return new Layout ( this , storedLayout ) ; } finally { txn . exit ( ) ; } |
public class CassandraArchiveRepository { /** * Unsupported . */
@ Override public void insertArchive ( JarScriptArchive jarScriptArchive , Map < String , Object > initialDeploySpecs ) throws IOException { } } | throw new UnsupportedOperationException ( "This repository does not support deployment specs." ) ; |
public class HMM { /** * Tag ( Latent Var ) is Xi , words ( Observed Var ) yi ( or oi ) . . .
* @ param data input sequence examples .
* @ return HMM model . */
public static HMMModel train ( List < SequenceTuple > data ) { } } | HMMModel model = new HMMModel ( ) ; for ( SequenceTuple seqTuple : data ) { Pair < List < String > , List < String > > wordTagPair = getXSeqOSeq ( seqTuple ) ; List < String > words = wordTagPair . getLeft ( ) ; List < String > tag = wordTagPair . getRight ( ) ; for ( int i = 0 ; i < words . size ( ) ; i ++ ) { Double ct = model . emission . get ( words . get ( i ) , tag . get ( i ) ) ; // Oi , Xi
ct = ct == null ? 1 : ct + 1 ; model . emission . put ( words . get ( i ) , tag . get ( i ) , ct ) ; } for ( int i = 0 ; i < tag . size ( ) - 1 ; i ++ ) { // Xi - > X _ i + 1
Double ct = model . transition . get ( tag . get ( i ) , tag . get ( i + 1 ) ) ; ct = ct == null ? 1 : ct + 1 ; model . transition . put ( tag . get ( i ) , tag . get ( i + 1 ) , ct ) ; } } normalizeEmission ( model ) ; normalizeTrans ( model ) ; return model ; |
public class AmazonEC2Waiters { /** * Builds a ConversionTaskCompleted waiter by using custom parameters waiterParameters and other parameters defined
* in the waiters specification , and then polls until it determines whether the resource entered the desired state
* or not , where polling criteria is bound by either default polling strategy or custom polling strategy . */
public Waiter < DescribeConversionTasksRequest > conversionTaskCompleted ( ) { } } | return new WaiterBuilder < DescribeConversionTasksRequest , DescribeConversionTasksResult > ( ) . withSdkFunction ( new DescribeConversionTasksFunction ( client ) ) . withAcceptors ( new ConversionTaskCompleted . IsCompletedMatcher ( ) , new ConversionTaskCompleted . IsCancelledMatcher ( ) , new ConversionTaskCompleted . IsCancellingMatcher ( ) ) . withDefaultPollingStrategy ( new PollingStrategy ( new MaxAttemptsRetryStrategy ( 40 ) , new FixedDelayStrategy ( 15 ) ) ) . withExecutorService ( executorService ) . build ( ) ; |
public class MessageProcessInfoField { /** * Set up the default screen control for this field .
* @ param itsLocation Location of this component on screen ( ie . , GridBagConstraint ) .
* @ param targetScreen Where to place this component ( ie . , Parent screen or GridBagLayout ) .
* @ param converter The converter to set the screenfield to .
* @ param iDisplayFieldDesc Display the label ? ( optional ) .
* @ param properties Extra properties
* @ return Return the component or ScreenField that is created for this field . */
public ScreenComponent setupDefaultView ( ScreenLoc itsLocation , ComponentParent targetScreen , Convert converter , int iDisplayFieldDesc , Map < String , Object > properties ) { } } | return this . setupTableLookup ( itsLocation , targetScreen , iDisplayFieldDesc , this . makeReferenceRecord ( ) , null , MessageProcessInfo . DESCRIPTION , true ) ; |
public class ModClusterContainer { /** * Register a web context . If the web context already exists , just enable it .
* @ param contextPath the context path
* @ param jvmRoute the jvmRoute
* @ param aliases the virtual host aliases */
public synchronized boolean enableContext ( final String contextPath , final String jvmRoute , final List < String > aliases ) { } } | final Node node = nodes . get ( jvmRoute ) ; if ( node != null ) { Context context = node . getContext ( contextPath , aliases ) ; if ( context == null ) { context = node . registerContext ( contextPath , aliases ) ; UndertowLogger . ROOT_LOGGER . registeringContext ( contextPath , jvmRoute ) ; UndertowLogger . ROOT_LOGGER . registeringContext ( contextPath , jvmRoute , aliases ) ; for ( final String alias : aliases ) { VirtualHost virtualHost = hosts . get ( alias ) ; if ( virtualHost == null ) { virtualHost = new VirtualHost ( ) ; hosts . put ( alias , virtualHost ) ; } virtualHost . registerContext ( contextPath , jvmRoute , context ) ; } } context . enable ( ) ; return true ; } return false ; |
public class PlayServerHttpExchange { /** * TODO https : / / github . com / vibe - project / vibe - java - platform / issues / 4 */
@ Override protected void doRead ( final Action < ByteBuffer > chunkAction ) { } } | // Using one of Play ' s thread pools may be a better way to go ?
// https : / / www . playframework . com / documentation / 2.3 . x / ThreadPools
// TODO https : / / github . com / vibe - project / vibe - java - platform / issues / 6
new Thread ( new Runnable ( ) { @ Override public void run ( ) { chunkAction . on ( ByteBuffer . wrap ( request . body ( ) . asRaw ( ) . asBytes ( ) ) ) ; endActions . fire ( ) ; } } ) . start ( ) ; |
public class HTMLs { /** * 将 HTML 文本适配到指定长度 。
* @ param html
* HTML 文本
* @ param length
* 目标长度
* @ return 适配结果 */
public static String fitToLength ( final String html , final int length ) { } } | if ( html == null ) { return StringUtils . repeat ( SPACE , length ) ; } int len = html . length ( ) ; if ( len >= length ) { return html ; } StringBuilder sb = new StringBuilder ( ) ; sb . append ( html ) ; for ( int i = 0 ; i < length - len ; i ++ ) { sb . append ( SPACE ) ; } return sb . toString ( ) ; |
public class FieldContainer { /** * Returns a string whose length and character type reflect the passed FieldCase using the
* embedded field object . If FieldCase . NULL or FieldCase . BLANK is passed , the method returns
* null .
* @ return the value just generated */
@ Override protected String initValuesImpl ( final FieldCase ca ) { } } | if ( ca == FieldCase . NULL || ca == FieldCase . BLANK ) { return null ; } return field . getString ( control . getNext ( ca ) ) ; |
public class PhreakBranchNode { /** * A branch has two potential sinks . rtnSink is for the sink if the contained logic returns true .
* mainSink is for propagations after the branch node , if they are allowed .
* it may have one or the other or both . there is no state that indicates whether one or the other or both
* are present , so all tuple children must be inspected and references coalesced from that .
* when handling updates and deletes it must search the child tuples to colasce the references .
* This is done by checking the tuple sink with the known main or rtn sink . */
private BranchTuples getBranchTuples ( LeftTupleSink sink , LeftTuple leftTuple ) { } } | BranchTuples branchTuples = new BranchTuples ( ) ; LeftTuple child = leftTuple . getFirstChild ( ) ; if ( child != null ) { // assigns the correct main or rtn LeftTuple based on the identified sink
if ( child . getTupleSink ( ) == sink ) { branchTuples . mainLeftTuple = child ; } else { branchTuples . rtnLeftTuple = child ; } child = child . getHandleNext ( ) ; if ( child != null ) { if ( child . getTupleSink ( ) == sink ) { branchTuples . mainLeftTuple = child ; } else { branchTuples . rtnLeftTuple = child ; } } } return branchTuples ; |
public class WebDriverBrowserBuilder { /** * Build a new WebDriver based EmbeddedBrowser .
* @ return the new build WebDriver based embeddedBrowser */
@ Override public EmbeddedBrowser get ( ) { } } | LOGGER . debug ( "Setting up a Browser" ) ; // Retrieve the config values used
ImmutableSortedSet < String > filterAttributes = configuration . getCrawlRules ( ) . getPreCrawlConfig ( ) . getFilterAttributeNames ( ) ; long crawlWaitReload = configuration . getCrawlRules ( ) . getWaitAfterReloadUrl ( ) ; long crawlWaitEvent = configuration . getCrawlRules ( ) . getWaitAfterEvent ( ) ; // Determine the requested browser type
EmbeddedBrowser browser = null ; EmbeddedBrowser . BrowserType browserType = configuration . getBrowserConfig ( ) . getBrowserType ( ) ; try { switch ( browserType ) { case CHROME : browser = newChromeBrowser ( filterAttributes , crawlWaitReload , crawlWaitEvent , false ) ; break ; case CHROME_HEADLESS : browser = newChromeBrowser ( filterAttributes , crawlWaitReload , crawlWaitEvent , true ) ; break ; case FIREFOX : browser = newFirefoxBrowser ( filterAttributes , crawlWaitReload , crawlWaitEvent , false ) ; break ; case FIREFOX_HEADLESS : browser = newFirefoxBrowser ( filterAttributes , crawlWaitReload , crawlWaitEvent , true ) ; break ; case REMOTE : browser = WebDriverBackedEmbeddedBrowser . withRemoteDriver ( configuration . getBrowserConfig ( ) . getRemoteHubUrl ( ) , filterAttributes , crawlWaitEvent , crawlWaitReload ) ; break ; case PHANTOMJS : browser = newPhantomJSDriver ( filterAttributes , crawlWaitReload , crawlWaitEvent ) ; break ; default : throw new IllegalStateException ( "Unrecognized browser type " + configuration . getBrowserConfig ( ) . getBrowserType ( ) ) ; } } catch ( IllegalStateException e ) { LOGGER . error ( "Crawling with {} failed: " + e . getMessage ( ) , browserType . toString ( ) ) ; throw e ; } /* for Retina display . */
if ( browser instanceof WebDriverBackedEmbeddedBrowser ) { int pixelDensity = this . configuration . getBrowserConfig ( ) . getBrowserOptions ( ) . getPixelDensity ( ) ; if ( pixelDensity != - 1 ) ( ( WebDriverBackedEmbeddedBrowser ) browser ) . setPixelDensity ( pixelDensity ) ; } plugins . runOnBrowserCreatedPlugins ( browser ) ; return browser ; |
public class RemotePidIterator { /** * Does the stash have anything in it ? If it ' s empty , try to fill it . */
private void refreshStash ( ) throws ObjectSourceException { } } | if ( ! stash . isEmpty ( ) ) { return ; } try { if ( SEARCH_NOT_STARTED . equals ( token ) ) { beginSearch ( ) ; } else if ( token != null ) { resumeSearch ( ) ; } } catch ( RemoteException e ) { throw new ObjectSourceException ( e ) ; } |
public class TmdbSearch { /** * Search Companies .
* You can use this method to search for production companies that are part of TMDb . The company IDs will map to those returned
* on movie calls .
* http : / / help . themoviedb . org / kb / api / search - companies
* @ param query
* @ param page
* @ return
* @ throws MovieDbException */
public ResultList < Company > searchCompanies ( String query , Integer page ) throws MovieDbException { } } | TmdbParameters parameters = new TmdbParameters ( ) ; parameters . add ( Param . QUERY , query ) ; parameters . add ( Param . PAGE , page ) ; URL url = new ApiUrl ( apiKey , MethodBase . SEARCH ) . subMethod ( MethodSub . COMPANY ) . buildUrl ( parameters ) ; WrapperGenericList < Company > wrapper = processWrapper ( getTypeReference ( Company . class ) , url , "company" ) ; return wrapper . getResultsList ( ) ; |
public class MapFormat { /** * Parses the string . Does not yet handle recursion ( where
* the substituted strings contain { n } references . )
* @ return New format . */
public String parse ( String source ) { } } | StringBuffer sbuf = new StringBuffer ( source ) ; Iterator key_it = argmap . keySet ( ) . iterator ( ) ; // skipped = new RangeList ( ) ;
// What was this for ? ?
// process ( source , " \ " " , " \ " " ) ; / / NOI18N
while ( key_it . hasNext ( ) ) { String it_key = ( String ) key_it . next ( ) ; String it_obj = formatObject ( argmap . get ( it_key ) ) ; int it_idx = - 1 ; do { it_idx = sbuf . toString ( ) . indexOf ( it_obj , ++ it_idx ) ; if ( it_idx >= 0 /* & & ! skipped . containsOffset ( it _ idx ) */
) { sbuf . replace ( it_idx , it_idx + it_obj . length ( ) , ldel + it_key + rdel ) ; // skipped = new RangeList ( ) ;
// What was this for ? ?
// process ( sbuf . toString ( ) , " \ " " , " \ " " ) ; / / NOI18N
} } while ( it_idx != - 1 ) ; } return sbuf . toString ( ) ; |
public class StaplerResponseWrapper { /** * { @ inheritDoc } */
@ Override public void addHeader ( String name , String value ) { } } | getWrapped ( ) . addHeader ( name , value ) ; |
public class TileSetColorImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
public void setCVAL4 ( Integer newCVAL4 ) { } } | Integer oldCVAL4 = cval4 ; cval4 = newCVAL4 ; if ( eNotificationRequired ( ) ) eNotify ( new ENotificationImpl ( this , Notification . SET , AfplibPackage . TILE_SET_COLOR__CVAL4 , oldCVAL4 , cval4 ) ) ; |
public class WebpIO { /** * delete temp dir and commands */
public void close ( ) { } } | File tmp = new File ( webpTmpDir ) ; if ( tmp . exists ( ) && tmp . isDirectory ( ) ) { File [ ] files = tmp . listFiles ( ) ; for ( File file : Objects . requireNonNull ( files ) ) { file . delete ( ) ; } tmp . delete ( ) ; } |
public class LinearClassifierFactory { /** * Trains the linear classifier using Generalized Expectation criteria as described in
* < tt > Generalized Expectation Criteria for Semi Supervised Learning of Conditional Random Fields < / tt > , Mann and McCallum , ACL 2008.
* The original algorithm is proposed for CRFs but has been adopted to LinearClassifier ( which is a simpler special case of a CRF ) .
* IMPORTANT : the labeled features that are passed as an argument are assumed to be binary valued , although
* other features are allowed to be real valued . */
public LinearClassifier < L , F > trainSemiSupGE ( GeneralDataset < L , F > labeledDataset , List < ? extends Datum < L , F > > unlabeledDataList , List < F > GEFeatures , double convexComboCoeff ) { } } | LogConditionalObjectiveFunction < L , F > objective = new LogConditionalObjectiveFunction < L , F > ( labeledDataset , new LogPrior ( LogPrior . LogPriorType . NULL ) ) ; GeneralizedExpectationObjectiveFunction < L , F > geObjective = new GeneralizedExpectationObjectiveFunction < L , F > ( labeledDataset , unlabeledDataList , GEFeatures ) ; SemiSupervisedLogConditionalObjectiveFunction semiSupObjective = new SemiSupervisedLogConditionalObjectiveFunction ( objective , geObjective , null , convexComboCoeff ) ; double [ ] initial = objective . initial ( ) ; double [ ] weights = minimizer . minimize ( semiSupObjective , TOL , initial ) ; return new LinearClassifier < L , F > ( objective . to2D ( weights ) , labeledDataset . featureIndex ( ) , labeledDataset . labelIndex ( ) ) ; |
public class AWSServiceCatalogClient { /** * Lists the provisioned products that are available ( not terminated ) .
* To use additional filtering , see < a > SearchProvisionedProducts < / a > .
* @ param scanProvisionedProductsRequest
* @ return Result of the ScanProvisionedProducts operation returned by the service .
* @ throws InvalidParametersException
* One or more parameters provided to the operation are not valid .
* @ sample AWSServiceCatalog . ScanProvisionedProducts
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / servicecatalog - 2015-12-10 / ScanProvisionedProducts "
* target = " _ top " > AWS API Documentation < / a > */
@ Override public ScanProvisionedProductsResult scanProvisionedProducts ( ScanProvisionedProductsRequest request ) { } } | request = beforeClientExecution ( request ) ; return executeScanProvisionedProducts ( request ) ; |
public class CookieBasedSessionManagementHelper { /** * Modifies a message so its Request Header / Body matches the web session provided .
* @ param message the message
* @ param session the session */
public static void processMessageToMatchSession ( HttpMessage message , HttpSession session ) { } } | processMessageToMatchSession ( message , message . getRequestHeader ( ) . getHttpCookies ( ) , session ) ; |
public class Entry { /** * Returns an observable map of properties on this entry for use primarily
* by application developers .
* @ return an observable map of properties on this entry for use primarily
* by application developers */
public final ObservableMap < Object , Object > getProperties ( ) { } } | if ( properties == null ) { properties = FXCollections . observableMap ( new HashMap < > ( ) ) ; MapChangeListener < ? super Object , ? super Object > changeListener = change -> { if ( change . getKey ( ) . equals ( "com.calendarfx.recurrence.source" ) ) { // $ NON - NLS - 1 $
if ( change . getValueAdded ( ) != null ) { @ SuppressWarnings ( "unchecked" ) Entry < T > source = ( Entry < T > ) change . getValueAdded ( ) ; // lookup of property first to instantiate
recurrenceSourceProperty ( ) ; recurrenceSource . set ( source ) ; } } else if ( change . getKey ( ) . equals ( "com.calendarfx.recurrence.id" ) ) { // $ NON - NLS - 1 $
if ( change . getValueAdded ( ) != null ) { setRecurrenceId ( ( String ) change . getValueAdded ( ) ) ; } } } ; properties . addListener ( changeListener ) ; } return properties ; |
public class BaasACL { /** * Checks if the role has the specified { @ code grant }
* @ param grant a { @ link com . baasbox . android . Grant }
* @ param role a role
* @ return */
public boolean hasRoleGrant ( Grant grant , String role ) { } } | if ( grant == null ) throw new IllegalArgumentException ( "grant cannot be null" ) ; if ( role == null ) throw new IllegalArgumentException ( "role cannot be null" ) ; Set < String > roles = rolesGrants . get ( grant ) ; return roles != null && roles . contains ( role ) ; |
public class CommonOps_DDF3 { /** * Returns the absolute value of the element in the vector that has the largest absolute value . < br >
* < br >
* Max { | a < sub > i < / sub > | } for all i < br >
* @ param a A matrix . Not modified .
* @ return The max abs element value of the vector . */
public static double elementMaxAbs ( DMatrix3 a ) { } } | double max = Math . abs ( a . a1 ) ; double tmp = Math . abs ( a . a2 ) ; if ( tmp > max ) max = tmp ; tmp = Math . abs ( a . a2 ) ; if ( tmp > max ) max = tmp ; tmp = Math . abs ( a . a3 ) ; if ( tmp > max ) max = tmp ; return max ; |
public class OfferService { /** * Checks if an incremental block report should be sent .
* @ param startTime
* @ return true if the report should be sent */
private boolean shouldSendIncrementalReport ( long startTime ) { } } | boolean isPrimary = isPrimaryServiceCached ( ) || donotDelayIncrementalBlockReports ; boolean deleteIntervalTrigger = ( startTime - lastDeletedReport > anode . deletedReportInterval ) ; // by default the report should be sent if there are any received
// acks , or the deleteInterval has passed
boolean sendReportDefault = pendingReceivedRequests > 0 || deleteIntervalTrigger ; if ( isPrimary ) { // if talking to primary , send the report with the default
// conditions
return sendReportDefault ; } else { // if talking to standby . send the report ONLY when the
// retry interval has passed in addition to the default
// condidtions
boolean sendIfStandby = ( lastBlockReceivedFailed + blockReceivedRetryInterval < startTime ) && sendReportDefault ; return sendIfStandby ; } |
public class DefaultSelendroidDriver { /** * ( non - Javadoc )
* @ see org . openqa . selenium . android . server . AndroidDriver # stopSession ( ) */
@ Override public void stopSession ( ) { } } | serverInstrumentation . finishAllActivities ( ) ; this . activeWindowType = WindowType . NATIVE_APP . name ( ) ; this . session = null ; nativeSearchScope = null ; selendroidNativeDriver = null ; selendroidWebDriver = null ; webviewSearchScope = null ; |
public class Configuration { /** * Load a class by name .
* @ param name the class name .
* @ return the class object .
* @ throws ClassNotFoundException if the class is not found . */
public Class < ? > getClassByName ( String name ) throws ClassNotFoundException { } } | Map < String , Class < ? > > map = CACHE_CLASSES . get ( classLoader ) ; if ( map == null ) { Map < String , Class < ? > > newMap = new ConcurrentHashMap < > ( ) ; map = CACHE_CLASSES . putIfAbsent ( classLoader , newMap ) ; if ( map == null ) { map = newMap ; } } Class clazz = map . get ( name ) ; if ( clazz == null ) { clazz = Class . forName ( name , true , classLoader ) ; if ( clazz != null ) { map . put ( name , clazz ) ; } } return clazz ; |
public class AsyncStorageWrapper { /** * Executes the given Callable synchronously and invokes cleanup when done .
* @ param operation The Callable to execute . */
private < R > CompletableFuture < R > execute ( Callable < R > operation ) { } } | return CompletableFuture . supplyAsync ( ( ) -> { try { return operation . call ( ) ; } catch ( Exception ex ) { throw new CompletionException ( ex ) ; } } , this . executor ) ; |
public class GolangGenerator { /** * between the message ' s blockLength and our ( older ) schema ' s blockLength */
private void generateExtensionCheck ( final StringBuilder sb , final char varName ) { } } | this . imports . add ( "io" ) ; this . imports . add ( "io/ioutil" ) ; sb . append ( String . format ( "\tif actingVersion > %1$s.SbeSchemaVersion() && blockLength > %1$s.SbeBlockLength() {\n" + "\t\tio.CopyN(ioutil.Discard, _r, int64(blockLength-%1$s.SbeBlockLength()))\n" + "\t}\n" , varName ) ) ; |
public class NamePatternUtil { /** * Accepts an Ant - ish or regular expression pattern and compiles to a regular expression .
* This is similar to SelectorUtils in the Maven codebase , but there the code uses the
* platform File . separator , while here we always want to work with forward slashes .
* Also , for a more natural fit with repository tags , both * and * * should stop at the colon
* that precedes the tag .
* Like SelectorUtils , wrapping a pattern in % regex [ pattern ] will create a regex from the
* pattern provided without translation . Otherwise , or if wrapped in % ant [ pattern ] ,
* then a regular expression will be created that is anchored at beginning and end ,
* converts ? to [ ^ / : ] , * to ( [ ^ / : ] | : ( ? = . * : ) ) and * * to ( [ ^ : ] | : ( ? = . * : ) ) * .
* If * * is followed by / , the / is converted to a negative lookbehind for anything
* apart from a slash .
* @ return a regular expression pattern created from the input pattern */
public static String convertImageNamePattern ( String pattern ) { } } | final String REGEX_PREFIX = "%regex[" , ANT_PREFIX = "%ant[" , PATTERN_SUFFIX = "]" ; if ( pattern . startsWith ( REGEX_PREFIX ) && pattern . endsWith ( PATTERN_SUFFIX ) ) { return pattern . substring ( REGEX_PREFIX . length ( ) , pattern . length ( ) - PATTERN_SUFFIX . length ( ) ) ; } if ( pattern . startsWith ( ANT_PREFIX ) && pattern . endsWith ( PATTERN_SUFFIX ) ) { pattern = pattern . substring ( ANT_PREFIX . length ( ) , pattern . length ( ) - PATTERN_SUFFIX . length ( ) ) ; } String [ ] parts = pattern . split ( "((?=[/:?*])|(?<=[/:?*]))" ) ; Matcher matcher = Pattern . compile ( "[A-Za-z0-9-]+" ) . matcher ( "" ) ; StringBuilder builder = new StringBuilder ( "^" ) ; for ( int i = 0 ; i < parts . length ; ++ i ) { if ( "?" . equals ( parts [ i ] ) ) { builder . append ( "[^/:]" ) ; } else if ( "*" . equals ( parts [ i ] ) ) { if ( i + 1 < parts . length && "*" . equals ( parts [ i + 1 ] ) ) { builder . append ( "([^:]|:(?=.*:))*" ) ; ++ i ; if ( i + 1 < parts . length && "/" . equals ( parts [ i + 1 ] ) ) { builder . append ( "(?<![^/])" ) ; ++ i ; } } else { builder . append ( "([^/:]|:(?=.*:))*" ) ; } } else if ( "/" . equals ( parts [ i ] ) || ":" . equals ( parts [ i ] ) || matcher . reset ( parts [ i ] ) . matches ( ) ) { builder . append ( parts [ i ] ) ; } else if ( parts [ i ] . length ( ) > 0 ) { builder . append ( Pattern . quote ( parts [ i ] ) ) ; } } builder . append ( "$" ) ; return builder . toString ( ) ; |
public class KeyVaultClientBaseImpl { /** * List storage SAS definitions for the given storage account . This operation requires the storage / listsas permission .
* @ param vaultBaseUrl The vault name , for example https : / / myvault . vault . azure . net .
* @ param storageAccountName The name of the storage account .
* @ param maxresults Maximum number of results to return in a page . If not specified the service will return up to 25 results .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable to the PagedList & lt ; SasDefinitionItem & gt ; object */
public Observable < ServiceResponse < Page < SasDefinitionItem > > > getSasDefinitionsWithServiceResponseAsync ( final String vaultBaseUrl , final String storageAccountName , final Integer maxresults ) { } } | return getSasDefinitionsSinglePageAsync ( vaultBaseUrl , storageAccountName , maxresults ) . concatMap ( new Func1 < ServiceResponse < Page < SasDefinitionItem > > , Observable < ServiceResponse < Page < SasDefinitionItem > > > > ( ) { @ Override public Observable < ServiceResponse < Page < SasDefinitionItem > > > call ( ServiceResponse < Page < SasDefinitionItem > > page ) { String nextPageLink = page . body ( ) . nextPageLink ( ) ; if ( nextPageLink == null ) { return Observable . just ( page ) ; } return Observable . just ( page ) . concatWith ( getSasDefinitionsNextWithServiceResponseAsync ( nextPageLink ) ) ; } } ) ; |
public class Sql { /** * Performs the given SQL query calling the given < code > closure < / code > with each row of the result set starting at
* the provided < code > offset < / code > , and including up to < code > maxRows < / code > number of rows .
* The row will be a < code > GroovyResultSet < / code > which is a < code > ResultSet < / code >
* that supports accessing the fields using property style notation and ordinal index values .
* The query may contain GString expressions .
* Note that the underlying implementation is based on either invoking < code > ResultSet . absolute ( ) < / code > ,
* or if the ResultSet type is < code > ResultSet . TYPE _ FORWARD _ ONLY < / code > , the < code > ResultSet . next ( ) < / code > method
* is invoked equivalently . The first row of a ResultSet is 1 , so passing in an offset of 1 or less has no effect
* on the initial positioning within the result set .
* Note that different database and JDBC driver implementations may work differently with respect to this method .
* Specifically , one should expect that < code > ResultSet . TYPE _ FORWARD _ ONLY < / code > may be less efficient than a
* " scrollable " type .
* @ param gstring a GString containing the SQL query with embedded params
* @ param offset the 1 - based offset for the first row to be processed
* @ param maxRows the maximum number of rows to be processed
* @ param closure called for each row with a GroovyResultSet
* @ throws SQLException if a database access error occurs */
public void eachRow ( GString gstring , int offset , int maxRows , @ ClosureParams ( value = SimpleType . class , options = "groovy.sql.GroovyResultSet" ) Closure closure ) throws SQLException { } } | List < Object > params = getParameters ( gstring ) ; String sql = asSql ( gstring , params ) ; eachRow ( sql , params , offset , maxRows , closure ) ; |
public class ParseTree { /** * Sets the parent location . Note the parent location must cover a superset of the
* specified location ' s area .
* @ param l The parent location . */
public void setParent ( IParseTree l ) { } } | if ( l != null && ! l . contains ( this ) && getLength ( ) > 0 ) { throw new IllegalArgumentException ( "Attempted set the parent location, but the parent location's area is not a superset of this location's area." ) ; } if ( _pe != null ) { ParsedElement parentElement = ( ParsedElement ) _pe . getParent ( ) ; if ( parentElement != null ) { ParseTree oldParent = parentElement . getLocation ( ) ; if ( oldParent != null ) { oldParent . _children . remove ( this ) ; } } _pe . setParent ( l == null ? null : ( ( ParseTree ) l ) . _pe ) ; } |
public class TaskBase { /** * Wrapper method that initially obtains lock then executes the passed method , and finally releases lock .
* @ param resource resource to be updated by the task .
* @ param parameters method parameters .
* @ param operation lambda operation that is the actual task .
* @ param < T > type parameter of return value of operation to be executed .
* @ return return value of task execution . */
public < T > CompletableFuture < T > execute ( final Resource resource , final Serializable [ ] parameters , final FutureOperation < T > operation ) { } } | if ( ! ready ) { return Futures . failedFuture ( new IllegalStateException ( getClass ( ) . getName ( ) + " not yet ready" ) ) ; } final String tag = UUID . randomUUID ( ) . toString ( ) ; final TaskData taskData = getTaskData ( parameters ) ; final CompletableFuture < T > result = new CompletableFuture < > ( ) ; final TaggedResource taggedResource = new TaggedResource ( tag , resource ) ; log . debug ( "Host={}, Tag={} starting to execute task {}-{} on resource {}" , context . hostId , tag , taskData . getMethodName ( ) , taskData . getMethodVersion ( ) , resource ) ; if ( createIndexOnlyMode ) { return createIndexes ( taggedResource , taskData ) ; } // PutChild ( HostId , resource )
// Initially store the fact that I am about the update the resource .
// Since multiple threads within this process could concurrently attempt to modify same resource ,
// we tag the resource name with a random GUID so as not to interfere with other thread ' s
// creation or deletion of resource children under HostId node .
taskMetadataStore . putChild ( context . hostId , taggedResource ) // After storing that fact , lock the resource , execute task and unlock the resource
. thenComposeAsync ( x -> executeTask ( resource , taskData , tag , operation ) , executor ) // finally delete the resource child created under the controller ' s HostId
. whenCompleteAsync ( ( value , e ) -> taskMetadataStore . removeChild ( context . hostId , taggedResource , true ) . whenCompleteAsync ( ( innerValue , innerE ) -> { // ignore the result of removeChile operations , since it is an optimization
if ( e != null ) { result . completeExceptionally ( e ) ; } else { result . complete ( value ) ; } } , executor ) , executor ) ; return result ; |
public class NeuronIndexer2 { /** * Indexes neuron and properties at the sentence level . */
@ Override protected List < IndexRequestBuilder > toRequest ( JCas jCas , Client client , String indexName ) throws IOException { } } | List < IndexRequestBuilder > requests = newArrayList ( ) ; String pmId = getHeaderDocId ( jCas ) ; try { Collection < Neuron > neurons = select ( jCas , Neuron . class ) ; // UIMA CAS indexes so that we build them once for all sentences .
Map < AnnotationFS , Collection < AnnotationFS > > idxNeuronProperties = indexCovered ( jCas . getCas ( ) , getType ( jCas , Neuron . class ) , getType ( jCas , NeuronProperty . class ) ) ; Map < AnnotationFS , Collection < AnnotationFS > > idxNeurons = indexCovered ( jCas . getCas ( ) , getType ( jCas , Neuron . class ) , getType ( jCas , NeuronTrigger . class ) ) ; // only index abstracts that contain a neuron
if ( ! ( onlyIndexNeurons && neurons . isEmpty ( ) ) ) { final XContentBuilder doc = jsonBuilder ( ) . startObject ( ) ; doc . field ( FIELD_PUBMED_ID , pmId ) ; doc . field ( FIELD_SENTENCE_TEXT , jCas . getDocumentText ( ) ) ; // authors & date
try { List < String > authors = new ArrayList < > ( ) ; Header header = selectSingle ( jCas , Header . class ) ; for ( int i = 0 ; i < header . getAuthors ( ) . size ( ) ; i ++ ) { AuthorInfo a = header . getAuthors ( i ) ; authors . add ( a . getForeName ( ) + " " + a . getLastName ( ) ) ; } doc . field ( FIELD_AUTHORS , authors ) ; String pubDate = header . getCopyright ( ) ; // 1976-01-16
if ( pubDate . indexOf ( "-" ) > - 1 ) { doc . field ( FIELD_PUBLISHED_DATE , pubDate ) ; } } catch ( Exception e ) { // nope
LOG . debug ( "failed to index authors or date for {}: {}" , pmId , e . toString ( ) ) ; } // INDEX NEURONS
doc . startArray ( "neuron" ) ; for ( Neuron nwp : neurons ) { doc . startObject ( ) ; doc . field ( FIELD_NEURON_TEXT , nwp . getCoveredText ( ) ) ; doc . field ( FIELD_START , nwp . getBegin ( ) ) ; doc . field ( FIELD_END , nwp . getEnd ( ) ) ; // properties
doc . startArray ( "neuron_properties" ) ; // neuron ( trigger ) , handle as it were a property
NeuronTrigger n = ( ( NeuronTrigger ) ( idxNeurons . get ( nwp ) . iterator ( ) . next ( ) ) ) ; doc . startObject ( ) ; doc . field ( FIELD_NEURON_TYPE , getName ( n ) ) ; doc . field ( FIELD_PROPERTY_TEXT , n . getCoveredText ( ) ) ; doc . field ( FIELD_START , n . getBegin ( ) ) ; doc . field ( FIELD_END , n . getEnd ( ) ) ; doc . endObject ( ) ; // end ' n ' ( neuron trigger )
// every NeuronProperty
if ( idxNeuronProperties . containsKey ( nwp ) ) { for ( AnnotationFS np : idxNeuronProperties . get ( nwp ) ) { doc . startObject ( ) ; doc . field ( FIELD_NEURON_TYPE , getName ( np ) ) ; doc . field ( FIELD_PROPERTY_TEXT , np . getCoveredText ( ) ) ; doc . field ( FIELD_START , np . getBegin ( ) ) ; doc . field ( FIELD_END , np . getEnd ( ) ) ; try { String id = ( ( NeuronProperty ) np ) . getOntologyId ( ) ; if ( id != null ) doc . field ( FIELD_ONTOLOGY_ID , id ) ; } catch ( Exception e ) { // nope
} doc . endObject ( ) ; } } doc . endArray ( ) ; // end ' properties '
doc . endObject ( ) ; // end nwp
} doc . endArray ( ) ; // end ' neuron '
// INDEX SEPARATE PROPERTIES
doc . startArray ( "all_neuron_properties" ) ; for ( NeuronProperty np : select ( jCas , NeuronProperty . class ) ) { doc . startObject ( ) ; doc . field ( FIELD_PROPERTY_TEXT , np . getCoveredText ( ) ) ; doc . field ( FIELD_NEURON_TYPE , getName ( np ) ) ; doc . field ( FIELD_START , np . getBegin ( ) ) ; doc . field ( FIELD_END , np . getEnd ( ) ) ; try { String id = np . getOntologyId ( ) ; if ( id != null ) doc . field ( FIELD_ONTOLOGY_ID , id ) ; } catch ( Exception e ) { // nope
} doc . endObject ( ) ; } doc . endArray ( ) ; String uid = pmId ; // + " _ " + sentenceStart ; / / uid for Lucene
requests . add ( client . prepareIndex ( indexName , "abstract_" , uid ) . setSource ( doc ) ) ; LOG . trace ( doc . string ( ) ) ; } } catch ( Exception e ) { LOG . warn ( "could not index pmid" + pmId , e ) ; } return requests ; |
public class Matrices { /** * Creates a plus function that adds given { @ code value } to it ' s argument .
* @ param arg a value to be added to function ' s argument
* @ return a closure object that does { @ code _ + _ } */
public static MatrixFunction asPlusFunction ( final double arg ) { } } | return new MatrixFunction ( ) { @ Override public double evaluate ( int i , int j , double value ) { return value + arg ; } } ; |
public class cudaGraphicsCubeFace { /** * Returns the String identifying the given cudaGraphicsCubeFace
* @ param n The cudaGraphicsCubeFace
* @ return The String identifying the given cudaGraphicsCubeFace */
public static String stringFor ( int n ) { } } | switch ( n ) { case cudaGraphicsCubeFacePositiveX : return "cudaGraphicsCubeFacePositiveX" ; case cudaGraphicsCubeFaceNegativeX : return "cudaGraphicsCubeFaceNegativeX" ; case cudaGraphicsCubeFacePositiveY : return "cudaGraphicsCubeFacePositiveY" ; case cudaGraphicsCubeFaceNegativeY : return "cudaGraphicsCubeFaceNegativeY" ; case cudaGraphicsCubeFacePositiveZ : return "cudaGraphicsCubeFacePositiveZ" ; case cudaGraphicsCubeFaceNegativeZ : return "cudaGraphicsCubeFaceNegativeZ" ; } return "INVALID cudaGraphicsCubeFace: " + n ; |
public class LocalQPConsumerKeyGroup { /** * Set the state of a ConsumerKeyGroup to not ready and remove it from the ready list .
* or decrement the specific ready counter ( if appropriate ) . Note that there is no need to
* increment the specific consumer list version number .
* This method only has any effect if the state was originally ready . */
public void notReady ( ) { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "notReady" ) ; // get the ready consumer list lock
synchronized ( consumerDispatcher . getDestination ( ) . getReadyConsumerPointLock ( ) ) { if ( ready ) { ready = false ; consumerDispatcher . removeReadyConsumer ( this , specificReady ) ; } } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "notReady" ) ; |
public class AbstractColorPickerPreference { /** * Obtains all attributes from a specific attribute set .
* @ param attributeSet
* The attribute set , the attributes should be obtained from , as an instance of the type
* { @ link AttributeSet } or null , if no attributes should be obtained
* @ param defaultStyle
* The default style to apply to this preference . If 0 , no style will be applied ( beyond
* what is included in the theme ) . This may either be an attribute resource , whose value
* will be retrieved from the current theme , or an explicit style resource
* @ param defaultStyleResource
* A resource identifier of a style resource that supplies default values for the
* preference , used only if the default style is 0 or can not be found in the theme . Can
* be 0 to not look for defaults */
private void obtainStyledAttributes ( @ Nullable final AttributeSet attributeSet , @ AttrRes final int defaultStyle , @ StyleRes final int defaultStyleResource ) { } } | TypedArray typedArray = getContext ( ) . obtainStyledAttributes ( attributeSet , R . styleable . AbstractColorPickerPreference , defaultStyle , defaultStyleResource ) ; try { obtainShowPreview ( typedArray ) ; obtainPreviewSize ( typedArray ) ; obtainPreviewShape ( typedArray ) ; obtainPreviewBorderWidth ( typedArray ) ; obtainPreviewBorderColor ( typedArray ) ; obtainPreviewBackground ( typedArray ) ; obtainColorFormat ( typedArray ) ; } finally { typedArray . recycle ( ) ; } |
public class PaymentChannelServerState { /** * Stores this channel ' s state in the wallet as a part of a { @ link StoredPaymentChannelServerStates } wallet
* extension and keeps it up - to - date each time payment is incremented . This will be automatically removed when
* a call to { @ link PaymentChannelV1ServerState # close ( ) } completes successfully . A channel may only be stored after it
* has fully opened ( ie state = = State . READY ) .
* @ param connectedHandler Optional { @ link PaymentChannelServer } object that manages this object . This will
* set the appropriate pointer in the newly created { @ link StoredServerChannel } before it is
* committed to wallet . If set , closing the state object will propagate the close to the
* handler which can then do a TCP disconnect . */
public synchronized void storeChannelInWallet ( @ Nullable PaymentChannelServer connectedHandler ) { } } | stateMachine . checkState ( State . READY ) ; if ( storedServerChannel != null ) return ; log . info ( "Storing state with contract hash {}." , getContract ( ) . getTxId ( ) ) ; StoredPaymentChannelServerStates channels = ( StoredPaymentChannelServerStates ) wallet . addOrGetExistingExtension ( new StoredPaymentChannelServerStates ( wallet , broadcaster ) ) ; storedServerChannel = new StoredServerChannel ( this , getMajorVersion ( ) , getContract ( ) , getClientOutput ( ) , getExpiryTime ( ) , serverKey , getClientKey ( ) , bestValueToMe , bestValueSignature ) ; if ( connectedHandler != null ) checkState ( storedServerChannel . setConnectedHandler ( connectedHandler , false ) == connectedHandler ) ; channels . putChannel ( storedServerChannel ) ; |
public class TableJson { /** * Start a move to an existing mirror that may or may not have once been primary . */
Delta newMoveRestart ( Storage src , Storage dest ) { } } | // If the destination used to be the initial primary for the group , it ' s consistent and ready to promote
// but lacking the MIRROR _ CONSISTENT marker attribute which is the prereq for promotion . Add one .
Delta consistentMarker = dest . isConsistent ( ) ? Deltas . conditional ( Conditions . isUndefined ( ) , Deltas . literal ( now ( ) ) ) : Deltas . noop ( ) ; return Deltas . mapBuilder ( ) . update ( STORAGE . key ( ) , Deltas . mapBuilder ( ) . update ( src . getUuidString ( ) , Deltas . mapBuilder ( ) . put ( Storage . MOVE_TO . key ( ) , dest . getUuidString ( ) ) . build ( ) ) . update ( dest . getUuidString ( ) , Deltas . mapBuilder ( ) // Clean slate : clear ' moveTo ' plus all markers for promotion states and later .
. update ( StorageState . MIRROR_CONSISTENT . getMarkerAttribute ( ) . key ( ) , consistentMarker ) . remove ( Storage . MOVE_TO . key ( ) ) . remove ( Storage . PROMOTION_ID . key ( ) ) . remove ( StorageState . PRIMARY . getMarkerAttribute ( ) . key ( ) ) . remove ( StorageState . MIRROR_EXPIRING . getMarkerAttribute ( ) . key ( ) ) . remove ( StorageState . MIRROR_EXPIRED . getMarkerAttribute ( ) . key ( ) ) . build ( ) ) . build ( ) ) . build ( ) ; |
public class CmsGalleryFactory { /** * Creates a new gallery dialog . < p >
* @ param galleryHandler the gallery handler
* @ param data the gallery data
* @ return the gallery dialog instance */
@ SuppressWarnings ( "unused" ) public static CmsGalleryDialog createDialog ( I_CmsGalleryHandler galleryHandler , CmsGalleryDataBean data ) { } } | CmsGalleryDialog galleryDialog = new CmsGalleryDialog ( galleryHandler ) ; new CmsGalleryController ( new CmsGalleryControllerHandler ( galleryDialog ) , data , null ) ; return galleryDialog ; |
public class CertificatePoliciesExtension { /** * Encode this extension value . */
private void encodeThis ( ) throws IOException { } } | if ( certPolicies == null || certPolicies . isEmpty ( ) ) { this . extensionValue = null ; } else { DerOutputStream os = new DerOutputStream ( ) ; DerOutputStream tmp = new DerOutputStream ( ) ; for ( PolicyInformation info : certPolicies ) { info . encode ( tmp ) ; } os . write ( DerValue . tag_Sequence , tmp ) ; this . extensionValue = os . toByteArray ( ) ; } |
public class TreetankStorageModule { /** * { @ inheritDoc } */
public void close ( ) throws IOException { } } | try { mRtx . commit ( ) ; mRtx . close ( ) ; } catch ( TTException exc ) { throw new IOException ( exc ) ; } |
public class JmxValueFormatterImpl { /** * Returns a formatted representation for the value .
* @ param value Value to format .
* @ return a formatted representation for the value . */
@ Override public Object format ( Object value ) { } } | if ( value == null ) { return nullStringValue ; } if ( value . getClass ( ) . isArray ( ) ) { return asList ( value ) ; } else if ( value instanceof Date ) { Date date = ( Date ) value ; if ( date . getTime ( ) % ONE_DAY_IN_MILLIS == 0 ) { return new SimpleDateFormat ( datePattern ) . format ( date ) ; } else { return new SimpleDateFormat ( dateTimePattern ) . format ( date ) ; } } else if ( value instanceof CompositeData ) { return toMap ( ( CompositeData ) value ) ; } else if ( value instanceof Element ) { return parseXmlElement ( ( Element ) value ) ; } return value ; |
public class StitchingFromMotion2D { /** * Resizes the stitch image . If no transform is provided then the old stitch region is simply
* places on top of the new one and copied . Pixels which do not exist in the old image are filled with zero .
* @ param widthStitch The new width of the stitch image .
* @ param heightStitch The new height of the stitch image .
* @ param newToOldStitch ( Optional ) Transform from new stitch image pixels to old stick pixels . Can be null . */
public void resizeStitchImage ( int widthStitch , int heightStitch , IT newToOldStitch ) { } } | // copy the old image into the new one
workImage . reshape ( widthStitch , heightStitch ) ; GImageMiscOps . fill ( workImage , 0 ) ; if ( newToOldStitch != null ) { PixelTransform < Point2D_F32 > newToOld = converter . convertPixel ( newToOldStitch , null ) ; distorter . setModel ( newToOld ) ; distorter . apply ( stitchedImage , workImage ) ; // update the transforms
IT tmp = ( IT ) worldToCurr . createInstance ( ) ; newToOldStitch . concat ( worldToInit , tmp ) ; worldToInit . set ( tmp ) ; computeCurrToInit_PixelTran ( ) ; } else { int overlapWidth = Math . min ( widthStitch , stitchedImage . width ) ; int overlapHeight = Math . min ( heightStitch , stitchedImage . height ) ; GImageMiscOps . copy ( 0 , 0 , 0 , 0 , overlapWidth , overlapHeight , stitchedImage , workImage ) ; } stitchedImage . reshape ( widthStitch , heightStitch ) ; I tmp = stitchedImage ; stitchedImage = workImage ; workImage = tmp ; this . widthStitch = widthStitch ; this . heightStitch = heightStitch ; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.