signature stringlengths 43 39.1k | implementation stringlengths 0 450k |
|---|---|
public class UnicodeFont { /** * Utility to create a Java font for a TTF file reference
* @ param ttfFileRef The file system or classpath location of the TrueTypeFont file .
* @ return The font created
* @ throws SlickException Indicates a failure to locate or load the font into Java ' s font
* system . */
private static Font createFont ( String ttfFileRef ) throws SlickException { } } | try { return Font . createFont ( Font . TRUETYPE_FONT , ResourceLoader . getResourceAsStream ( ttfFileRef ) ) ; } catch ( FontFormatException ex ) { throw new SlickException ( "Invalid font: " + ttfFileRef , ex ) ; } catch ( IOException ex ) { throw new SlickException ( "Error reading font: " + ttfFileRef , ex ) ; } |
public class DefaultSelendroidDriver { /** * ( non - Javadoc )
* @ see org . openqa . selenium . android . server . AndroidDriver # getSessionCapabilities ( java . lang . String ) */
@ Override public JSONObject getSessionCapabilities ( String sessionId ) { } } | SelendroidLogger . info ( "session: " + sessionId ) ; JSONObject copy ; try { JSONObject capabilities = session . getCapabilities ( ) ; if ( capabilities != null ) { copy = new JSONObject ( capabilities . toString ( ) ) ; } else { copy = new JSONObject ( ) ; } copy . put ( TAKES_SCREENSHOT , true ) ; copy . put ( BROWSER_NAME , "selendroid" ) ; copy . put ( "automationName" , "selendroid" ) ; copy . put ( "platformName" , "android" ) ; copy . put ( "platformVersion" , serverInstrumentation . getOsVersion ( ) ) ; copy . put ( ROTATABLE , true ) ; copy . put ( PLATFORM , "android" ) ; copy . put ( SUPPORTS_ALERTS , true ) ; copy . put ( SUPPORTS_JAVASCRIPT , true ) ; copy . put ( SUPPORTS_NETWORK_CONNECTION , true ) ; copy . put ( "version" , serverInstrumentation . getServerVersion ( ) ) ; copy . put ( ACCEPT_SSL_CERTS , true ) ; SelendroidLogger . info ( "capabilities: " + copy ) ; return copy ; } catch ( JSONException e ) { throw new SelendroidException ( e ) ; } |
public class CmsXmlUtils { /** * Splits a content value path into its components , ignoring leading or trailing slashes . < p >
* Note : this does not work for XPaths in general , only for the paths used to identify values in OpenCms contents . < p >
* @ param xpath the xpath
* @ return the path components */
public static List < String > splitXpath ( String xpath ) { } } | return Arrays . stream ( xpath . split ( "/" ) ) . filter ( s -> ! s . isEmpty ( ) ) . collect ( Collectors . toList ( ) ) ; |
public class ZonedDateTime { /** * Obtains an instance of { @ code ZonedDateTime } from a local date - time .
* This creates a zoned date - time matching the input local date - time as closely as possible .
* Time - zone rules , such as daylight savings , mean that not every local date - time
* is valid for the specified zone , thus the local date - time may be adjusted .
* The local date - time is resolved to a single instant on the time - line .
* This is achieved by finding a valid offset from UTC / Greenwich for the local
* date - time as defined by the { @ link ZoneRules rules } of the zone ID .
* In most cases , there is only one valid offset for a local date - time .
* In the case of an overlap , when clocks are set back , there are two valid offsets .
* This method uses the earlier offset typically corresponding to " summer " .
* In the case of a gap , when clocks jump forward , there is no valid offset .
* Instead , the local date - time is adjusted to be later by the length of the gap .
* For a typical one hour daylight savings change , the local date - time will be
* moved one hour later into the offset typically corresponding to " summer " .
* @ param localDateTime the local date - time , not null
* @ param zone the time - zone , not null
* @ return the zoned date - time , not null */
public static ZonedDateTime of ( LocalDateTime localDateTime , ZoneId zone ) { } } | return ofLocal ( localDateTime , zone , null ) ; |
public class Base64EncodedSignerWithChoosersByAliasImpl { /** * Signs a message .
* @ param keyStoreChooser the keystore chooser
* @ param privateKeyChooserByAlias the private key chooser
* @ param message the message to sign
* @ return the base64 encoded signature */
public String sign ( KeyStoreChooser keyStoreChooser , PrivateKeyChooserByAlias privateKeyChooserByAlias , String message ) { } } | Base64EncodedSigner signer = cache . get ( cacheKey ( keyStoreChooser , privateKeyChooserByAlias ) ) ; if ( signer != null ) { return signer . sign ( message ) ; } Base64EncodedSignerImpl signerImpl = new Base64EncodedSignerImpl ( ) ; signerImpl . setAlgorithm ( algorithm ) ; signerImpl . setCharsetName ( charsetName ) ; signerImpl . setProvider ( provider ) ; PrivateKey privateKey = privateKeyRegistryByAlias . get ( keyStoreChooser , privateKeyChooserByAlias ) ; if ( privateKey == null ) { throw new SignatureException ( "private key not found: keyStoreName=" + keyStoreChooser . getKeyStoreName ( ) + ", alias=" + privateKeyChooserByAlias . getAlias ( ) ) ; } signerImpl . setPrivateKey ( privateKey ) ; cache . put ( cacheKey ( keyStoreChooser , privateKeyChooserByAlias ) , signerImpl ) ; return signerImpl . sign ( message ) ; |
public class SegmentedJournal { /** * Loads all segments from disk .
* @ return A collection of segments for the log . */
protected Collection < JournalSegment < E > > loadSegments ( ) { } } | // Ensure log directories are created .
directory . mkdirs ( ) ; TreeMap < Long , JournalSegment < E > > segments = new TreeMap < > ( ) ; // Iterate through all files in the log directory .
for ( File file : directory . listFiles ( File :: isFile ) ) { // If the file looks like a segment file , attempt to load the segment .
if ( JournalSegmentFile . isSegmentFile ( name , file ) ) { JournalSegmentFile segmentFile = new JournalSegmentFile ( file ) ; ByteBuffer buffer = ByteBuffer . allocate ( JournalSegmentDescriptor . BYTES ) ; try ( FileChannel channel = openChannel ( file ) ) { channel . read ( buffer ) ; buffer . flip ( ) ; } catch ( IOException e ) { throw new StorageException ( e ) ; } JournalSegmentDescriptor descriptor = new JournalSegmentDescriptor ( buffer ) ; // Load the segment .
JournalSegment < E > segment = loadSegment ( descriptor . id ( ) ) ; // Add the segment to the segments list .
log . debug ( "Found segment: {} ({})" , segment . descriptor ( ) . id ( ) , segmentFile . file ( ) . getName ( ) ) ; segments . put ( segment . index ( ) , segment ) ; } } // Verify that all the segments in the log align with one another .
JournalSegment < E > previousSegment = null ; boolean corrupted = false ; Iterator < Map . Entry < Long , JournalSegment < E > > > iterator = segments . entrySet ( ) . iterator ( ) ; while ( iterator . hasNext ( ) ) { JournalSegment < E > segment = iterator . next ( ) . getValue ( ) ; if ( previousSegment != null && previousSegment . lastIndex ( ) != segment . index ( ) - 1 ) { log . warn ( "Journal is inconsistent. {} is not aligned with prior segment {}" , segment . file ( ) . file ( ) , previousSegment . file ( ) . file ( ) ) ; corrupted = true ; } if ( corrupted ) { segment . close ( ) ; segment . delete ( ) ; iterator . remove ( ) ; } previousSegment = segment ; } return segments . values ( ) ; |
public class ParserDML { /** * Retrieves an INSERT Statement from this parse context . */
StatementDMQL compileInsertStatement ( RangeVariable [ ] outerRanges ) { } } | read ( ) ; readThis ( Tokens . INTO ) ; boolean [ ] columnCheckList ; int [ ] columnMap ; int colCount ; Table table = readTableName ( ) ; boolean overridingUser = false ; boolean overridingSystem = false ; int enforcedDefaultIndex = table . getIdentityColumnIndex ( ) ; boolean assignsToIdentity = false ; columnCheckList = null ; columnMap = table . getColumnMap ( ) ; colCount = table . getColumnCount ( ) ; int position = getPosition ( ) ; if ( ! table . isInsertable ( ) ) { throw Error . error ( ErrorCode . X_42545 ) ; } Table baseTable = table . getBaseTable ( ) ; switch ( token . tokenType ) { case Tokens . DEFAULT : { read ( ) ; readThis ( Tokens . VALUES ) ; Expression insertExpression = new Expression ( OpTypes . ROW , new Expression [ ] { } ) ; insertExpression = new Expression ( OpTypes . TABLE , new Expression [ ] { insertExpression } ) ; columnCheckList = table . getNewColumnCheckList ( ) ; for ( int i = 0 ; i < table . colDefaults . length ; i ++ ) { if ( table . colDefaults [ i ] == null && table . identityColumn != i ) { throw Error . error ( ErrorCode . X_42544 ) ; } } StatementDMQL cs = new StatementInsert ( session , table , columnMap , insertExpression , columnCheckList , compileContext ) ; return cs ; } case Tokens . OPENBRACKET : { int brackets = readOpenBrackets ( ) ; if ( brackets == 1 ) { boolean isQuery = false ; switch ( token . tokenType ) { case Tokens . WITH : case Tokens . SELECT : case Tokens . TABLE : { rewind ( position ) ; isQuery = true ; break ; } default : } if ( isQuery ) { break ; } OrderedHashSet columnNames = new OrderedHashSet ( ) ; readSimpleColumnNames ( columnNames , table ) ; readThis ( Tokens . CLOSEBRACKET ) ; colCount = columnNames . size ( ) ; columnMap = table . getColumnIndexes ( columnNames ) ; if ( token . tokenType != Tokens . VALUES && token . tokenType != Tokens . OVERRIDING ) { break ; } // $ FALL - THROUGH $
} else { rewind ( position ) ; break ; } } // $ FALL - THROUGH $
case Tokens . OVERRIDING : { if ( token . tokenType == Tokens . OVERRIDING ) { read ( ) ; if ( token . tokenType == Tokens . USER ) { read ( ) ; overridingUser = true ; } else if ( token . tokenType == Tokens . SYSTEM ) { read ( ) ; overridingSystem = true ; } else { unexpectedToken ( ) ; } } if ( token . tokenType != Tokens . VALUES ) { break ; } } // $ FALL - THROUGH $
case Tokens . VALUES : { read ( ) ; columnCheckList = table . getColumnCheckList ( columnMap ) ; Expression insertExpressions = XreadContextuallyTypedTable ( colCount ) ; HsqlList unresolved = insertExpressions . resolveColumnReferences ( outerRanges , null ) ; ExpressionColumn . checkColumnsResolved ( unresolved ) ; insertExpressions . resolveTypes ( session , null ) ; setParameterTypes ( insertExpressions , table , columnMap ) ; if ( table != baseTable ) { int [ ] baseColumnMap = table . getBaseTableColumnMap ( ) ; int [ ] newColumnMap = new int [ columnMap . length ] ; ArrayUtil . projectRow ( baseColumnMap , columnMap , newColumnMap ) ; columnMap = newColumnMap ; } Expression [ ] rowList = insertExpressions . nodes ; for ( int j = 0 ; j < rowList . length ; j ++ ) { Expression [ ] rowArgs = rowList [ j ] . nodes ; for ( int i = 0 ; i < rowArgs . length ; i ++ ) { Expression e = rowArgs [ i ] ; if ( enforcedDefaultIndex == columnMap [ i ] ) { assignsToIdentity = true ; if ( e . getType ( ) != OpTypes . DEFAULT ) { if ( table . identitySequence . isAlways ( ) ) { if ( ! overridingUser && ! overridingSystem ) { throw Error . error ( ErrorCode . X_42543 ) ; } } else { /* if ( overridingUser ) {
throw Trace . error (
Trace . SQL _ DEFAULT _ CLAUSE _ REQUITED ) ; */
} } } if ( e . isParam ( ) ) { e . setAttributesAsColumn ( table . getColumn ( columnMap [ i ] ) , true ) ; } else if ( e . getType ( ) == OpTypes . DEFAULT ) { if ( table . colDefaults [ i ] == null && table . identityColumn != columnMap [ i ] ) { throw Error . error ( ErrorCode . X_42544 ) ; } } } } if ( ! assignsToIdentity && ( overridingUser || overridingSystem ) ) { unexpectedTokenRequire ( Tokens . T_OVERRIDING ) ; } StatementDMQL cs = new StatementInsert ( session , table , columnMap , insertExpressions , columnCheckList , compileContext ) ; return cs ; } case Tokens . WITH : case Tokens . SELECT : case Tokens . TABLE : { break ; } default : { throw unexpectedToken ( ) ; } } columnCheckList = table . getColumnCheckList ( columnMap ) ; QueryExpression queryExpression = XreadQueryExpression ( ) ; queryExpression . setAsTopLevel ( ) ; queryExpression . resolve ( session , outerRanges ) ; if ( colCount != queryExpression . getColumnCount ( ) ) { throw Error . error ( ErrorCode . X_42546 ) ; } if ( table != baseTable ) { int [ ] baseColumnMap = table . getBaseTableColumnMap ( ) ; int [ ] newColumnMap = new int [ columnMap . length ] ; ArrayUtil . projectRow ( baseColumnMap , columnMap , newColumnMap ) ; columnMap = newColumnMap ; } if ( enforcedDefaultIndex != - 1 && ArrayUtil . find ( columnMap , enforcedDefaultIndex ) > - 1 ) { if ( table . identitySequence . isAlways ( ) ) { if ( ! overridingUser && ! overridingSystem ) { throw Error . error ( ErrorCode . X_42543 ) ; } } else { /* if ( overridingUser ) {
throw Trace . error (
Trace . SQL _ DEFAULT _ CLAUSE _ REQUITED ) ; */
} } else if ( overridingUser || overridingSystem ) { unexpectedTokenRequire ( Tokens . T_OVERRIDING ) ; } StatementDMQL cs = new StatementInsert ( session , table , columnMap , columnCheckList , queryExpression , compileContext ) ; return cs ; |
public class AbstractSshAuthConfigurator { /** * Iterates through the known hosts ( host key repository ) .
* If one of the know hosts matches the current host we ' re trying to connect too ,
* it configures the session to use that key ' s algorithm
* ( thus avoiding conflicts between JSch wanting RSA and the key being ECDSA ) */
protected void setHostKeyType ( OpenSshConfig . Host host , Session session ) { } } | HostKey [ ] hostKeys = session . getHostKeyRepository ( ) . getHostKey ( ) ; for ( HostKey hostKey : hostKeys ) { if ( hostKey . getHost ( ) . contains ( host . getHostName ( ) ) ) { session . setConfig ( KEY_TYPE_CONFIG , hostKey . getType ( ) ) ; } } |
public class LongDoubleUnsortedVector { /** * NOTE : this is much less efficient than calls to add ( ) . */
public double set ( long index , double value ) { } } | compact ( ) ; int i = findIndexMatching ( index ) ; if ( i < 0 ) { add ( index , value ) ; compacted = false ; return 0 ; } else { double old = vals [ i ] ; vals [ i ] = value ; return old ; } |
public class BaseBundleActivator { /** * Get the properties .
* @ return the properties . */
public String getProperty ( String key ) { } } | String servicePid = this . getServicePid ( ) ; String value = null ; if ( ! key . contains ( "." ) ) { value = context . getProperty ( servicePid + '.' + key ) ; if ( ( value == null ) && ( properties != null ) ) value = properties . get ( servicePid + '.' + key ) ; } if ( value == null ) value = context . getProperty ( key ) ; if ( ( value == null ) && ( properties != null ) ) value = properties . get ( key ) ; return value ; |
public class AmazonMTurkClient { /** * The < code > GetFileUploadURL < / code > operation generates and returns a temporary URL . You use the temporary URL to
* retrieve a file uploaded by a Worker as an answer to a FileUploadAnswer question for a HIT . The temporary URL is
* generated the instant the GetFileUploadURL operation is called , and is valid for 60 seconds . You can get a
* temporary file upload URL any time until the HIT is disposed . After the HIT is disposed , any uploaded files are
* deleted , and cannot be retrieved . Pending Deprecation on December 12 , 2017 . The Answer Specification structure
* will no longer support the < code > FileUploadAnswer < / code > element to be used for the QuestionForm data structure .
* Instead , we recommend that Requesters who want to create HITs asking Workers to upload files to use Amazon S3.
* @ param getFileUploadURLRequest
* @ return Result of the GetFileUploadURL operation returned by the service .
* @ throws ServiceException
* Amazon Mechanical Turk is temporarily unable to process your request . Try your call again .
* @ throws RequestErrorException
* Your request is invalid .
* @ sample AmazonMTurk . GetFileUploadURL
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / mturk - requester - 2017-01-17 / GetFileUploadURL "
* target = " _ top " > AWS API Documentation < / a > */
@ Override public GetFileUploadURLResult getFileUploadURL ( GetFileUploadURLRequest request ) { } } | request = beforeClientExecution ( request ) ; return executeGetFileUploadURL ( request ) ; |
public class HtmlDocletWriter { /** * Add the inline deprecated comment .
* @ param e the Element for which the inline deprecated comment will be added
* @ param tag the inline tag to be added
* @ param htmltree the content tree to which the comment will be added */
public void addInlineDeprecatedComment ( Element e , DocTree tag , Content htmltree ) { } } | CommentHelper ch = utils . getCommentHelper ( e ) ; addCommentTags ( e , ch . getBody ( configuration , tag ) , true , false , htmltree ) ; |
public class RGraph { /** * Projects a RGraph bitset on the source graph G2.
* @ param set RGraph BitSet to project
* @ return The associate BitSet in G2 */
public BitSet projectG2 ( BitSet set ) { } } | BitSet projection = new BitSet ( secondGraphSize ) ; RNode xNode = null ; for ( int x = set . nextSetBit ( 0 ) ; x >= 0 ; x = set . nextSetBit ( x + 1 ) ) { xNode = ( RNode ) graph . get ( x ) ; projection . set ( xNode . rMap . id2 ) ; } return projection ; |
public class Element { /** * Check that the element has no text content . */
void assertNoText ( ) throws InvalidInputException { } } | for ( Element child : this . getChildren ( ) ) if ( child instanceof TextNode && StringUtils . isNotBlank ( ( ( TextNode ) child ) . getText ( ) ) ) { throw new InvalidInputException ( "Element \"" + this . getMessageMLTag ( ) + "\" may not have text content" ) ; } |
public class ServletRedirectTask { /** * { @ inheritDoc } */
public Formula getFormula ( ) { } } | Reagent [ ] reagents = new Reagent [ ] { RESOURCE , SOURCE } ; final Formula rslt = new SimpleFormula ( ServletRedirectTask . class , reagents ) ; return rslt ; |
public class LdapSpnegoKnownClientSystemsFilterAction { /** * Create and open a connection to ldap
* via the given config and provider .
* @ return the connection
* @ throws LdapException the ldap exception */
protected Connection createConnection ( ) throws LdapException { } } | LOGGER . debug ( "Establishing a connection..." ) ; val connection = this . connectionFactory . getConnection ( ) ; connection . open ( ) ; return connection ; |
public class OptimizerNode { @ Override public Iterator < OptimizerNode > getPredecessors ( ) { } } | List < OptimizerNode > allPredecessors = new ArrayList < OptimizerNode > ( ) ; for ( Iterator < PactConnection > inputs = getIncomingConnections ( ) . iterator ( ) ; inputs . hasNext ( ) ; ) { allPredecessors . add ( inputs . next ( ) . getSource ( ) ) ; } for ( PactConnection conn : getBroadcastConnections ( ) ) { allPredecessors . add ( conn . getSource ( ) ) ; } return allPredecessors . iterator ( ) ; |
public class PageFlowActionServlet { /** * Last chance to handle an unhandled action URI .
* @ return < code > true < / code > if this method handled it ( by forwarding somewhere or writing to the response ) . */
protected boolean processUnhandledAction ( HttpServletRequest request , HttpServletResponse response , String uri ) throws IOException , ServletException { } } | // First check to see if we ' re already in a forwarded fallback request . If so , just bail .
PageFlowRequestWrapper rw = PageFlowRequestWrapper . get ( request ) ; if ( rw . getOriginalServletPath ( ) != null ) return false ; SharedFlowController sharedFlowToTry = null ; String uriBaseName = ServletUtils . getBaseName ( uri ) ; int firstDot = uriBaseName . indexOf ( '.' ) ; int lastDot = uriBaseName . lastIndexOf ( '.' ) ; if ( firstDot != - 1 && firstDot != lastDot ) { String sharedFlowName = uriBaseName . substring ( 0 , firstDot ) ; try { RequestContext rc = new RequestContext ( request , response ) ; Map defaultSharedFlows = FlowControllerFactory . get ( getServletContext ( ) ) . getDefaultSharedFlows ( rc ) ; if ( defaultSharedFlows != null ) { sharedFlowToTry = ( SharedFlowController ) defaultSharedFlows . get ( sharedFlowName ) ; uriBaseName = uriBaseName . substring ( firstDot + 1 ) ; } } catch ( ClassNotFoundException e ) { ServletUtils . throwServletException ( e ) ; } catch ( InstantiationException e ) { ServletUtils . throwServletException ( e ) ; } catch ( IllegalAccessException e ) { ServletUtils . throwServletException ( e ) ; } } else { sharedFlowToTry = FlowControllerFactory . getGlobalApp ( request , response , getServletContext ( ) ) ; } // If we couldn ' t find an appropriate module , try raising the action on the ( deprecated ) Global . app .
if ( sharedFlowToTry != null ) { InternalStringBuilder sfActionURI = new InternalStringBuilder ( sharedFlowToTry . getModulePath ( ) ) ; sfActionURI . append ( '/' ) ; sfActionURI . append ( uriBaseName ) ; rw . setOriginalServletPath ( uri ) ; ForwardRedirectHandler frh = _handlers . getForwardRedirectHandler ( ) ; FlowControllerHandlerContext context = new FlowControllerHandlerContext ( request , response , null ) ; frh . forward ( context , sfActionURI . toString ( ) ) ; return true ; } return false ; |
public class Exceptions { /** * Returns a consumer which will unwrap and rethrow any throwables caught in { @ code consumer } .
* @ param consumer the consumer
* @ param < T > the input type
* @ param < E > the exception type
* @ return a consumer */
public static < T , E extends Throwable > @ NonNull Consumer < T > unwrappingRethrowConsumer ( final @ NonNull ThrowingConsumer < T , E > consumer ) { } } | return input -> { try { consumer . throwingAccept ( input ) ; } catch ( final Throwable t ) { throw rethrow ( unwrap ( t ) ) ; } } ; |
public class TableProxy { /** * Update the current record .
* @ param The data to update .
* @ exception Exception File exception . */
public void set ( Object data , int iOpenMode ) throws DBException , RemoteException { } } | BaseTransport transport = this . createProxyTransport ( SET ) ; transport . addParam ( DATA , data ) ; transport . addParam ( OPEN_MODE , iOpenMode ) ; Object strReturn = transport . sendMessageAndGetReply ( ) ; Object objReturn = transport . convertReturnObject ( strReturn ) ; this . checkDBException ( objReturn ) ; |
public class CaseArgumentAnalyser { /** * Returns a list with argument words that are not equal in all cases */
List < List < Word > > getDifferentArguments ( List < List < Word > > argumentWords ) { } } | List < List < Word > > result = Lists . newArrayList ( ) ; for ( int i = 0 ; i < argumentWords . size ( ) ; i ++ ) { result . add ( Lists . < Word > newArrayList ( ) ) ; } int nWords = argumentWords . get ( 0 ) . size ( ) ; for ( int iWord = 0 ; iWord < nWords ; iWord ++ ) { Word wordOfFirstCase = argumentWords . get ( 0 ) . get ( iWord ) ; // data tables have equal here , otherwise
// the cases would be structurally different
if ( wordOfFirstCase . isDataTable ( ) ) { continue ; } boolean different = false ; for ( int iCase = 1 ; iCase < argumentWords . size ( ) ; iCase ++ ) { Word wordOfCase = argumentWords . get ( iCase ) . get ( iWord ) ; if ( ! wordOfCase . getFormattedValue ( ) . equals ( wordOfFirstCase . getFormattedValue ( ) ) ) { different = true ; break ; } } if ( different ) { for ( int iCase = 0 ; iCase < argumentWords . size ( ) ; iCase ++ ) { result . get ( iCase ) . add ( argumentWords . get ( iCase ) . get ( iWord ) ) ; } } } return result ; |
public class IfcBSplineCurveWithKnotsImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
@ SuppressWarnings ( "unchecked" ) @ Override public EList < Double > getKnots ( ) { } } | return ( EList < Double > ) eGet ( Ifc4Package . Literals . IFC_BSPLINE_CURVE_WITH_KNOTS__KNOTS , true ) ; |
public class ReloadableType { /** * TODO [ perf ] performance sucks as we walk multiple times ! */
public FieldReaderWriter locateField ( String name ) { } } | if ( hasFieldChangedInHierarchy ( name ) ) { return walk ( name , getLatestTypeDescriptor ( ) ) ; } else { return getFieldInHierarchy ( name ) ; } |
public class DefaultTokenManager { /** * Submits a token refresh task
* @ return void */
protected void submitRefreshTask ( ) { } } | TokenRefreshTask tokenRefreshTask = new TokenRefreshTask ( iamEndpoint , this ) ; executor . execute ( tokenRefreshTask ) ; log . debug ( "Submitted token refresh task" ) ; |
public class DOMDifferenceEngine { /** * Dispatches to the node type specific comparison if one is
* defined for the given combination of nodes . */
private ComparisonState nodeTypeSpecificComparison ( Node control , XPathContext controlContext , Node test , XPathContext testContext ) { } } | switch ( control . getNodeType ( ) ) { case Node . CDATA_SECTION_NODE : case Node . COMMENT_NODE : case Node . TEXT_NODE : if ( test instanceof CharacterData ) { return compareCharacterData ( ( CharacterData ) control , controlContext , ( CharacterData ) test , testContext ) ; } break ; case Node . DOCUMENT_NODE : if ( test instanceof Document ) { return compareDocuments ( ( Document ) control , controlContext , ( Document ) test , testContext ) ; } break ; case Node . ELEMENT_NODE : if ( test instanceof Element ) { return compareElements ( ( Element ) control , controlContext , ( Element ) test , testContext ) ; } break ; case Node . PROCESSING_INSTRUCTION_NODE : if ( test instanceof ProcessingInstruction ) { return compareProcessingInstructions ( ( ProcessingInstruction ) control , controlContext , ( ProcessingInstruction ) test , testContext ) ; } break ; case Node . DOCUMENT_TYPE_NODE : if ( test instanceof DocumentType ) { return compareDocTypes ( ( DocumentType ) control , controlContext , ( DocumentType ) test , testContext ) ; } break ; case Node . ATTRIBUTE_NODE : if ( test instanceof Attr ) { return compareAttributes ( ( Attr ) control , controlContext , ( Attr ) test , testContext ) ; } break ; default : break ; } return new OngoingComparisonState ( ) ; |
public class TextBox { /** * Updates the text content of the { @ code TextBox } to the supplied string .
* @ param text New text to assign to the { @ code TextBox }
* @ return Itself */
public synchronized TextBox setText ( String text ) { } } | String [ ] split = text . split ( "\n" ) ; if ( split . length == 0 ) { split = new String [ ] { "" } ; } lines . clear ( ) ; longestRow = 1 ; for ( String line : split ) { addLine ( line ) ; } if ( caretPosition . getRow ( ) > lines . size ( ) - 1 ) { caretPosition = caretPosition . withRow ( lines . size ( ) - 1 ) ; } if ( caretPosition . getColumn ( ) > lines . get ( caretPosition . getRow ( ) ) . length ( ) ) { caretPosition = caretPosition . withColumn ( lines . get ( caretPosition . getRow ( ) ) . length ( ) ) ; } invalidate ( ) ; return this ; |
public class JsonMappingOption { public JsonMappingOption acceptAnother ( JsonMappingControlMeta another ) { } } | localDateFormatter = another . getLocalDateFormatter ( ) ; localDateFormattingTrigger = another . getLocalDateFormattingTrigger ( ) ; localDateTimeFormatter = another . getLocalDateTimeFormatter ( ) ; localDateTimeFormattingTrigger = another . getLocalDateTimeFormattingTrigger ( ) ; localTimeFormatter = another . getLocalTimeFormatter ( ) ; localTimeFormattingTrigger = another . getLocalTimeFormattingTrigger ( ) ; zonedDateTimeFormatter = another . getZonedDateTimeFormatter ( ) ; booleanDeserializer = another . getBooleanDeserializer ( ) ; booleanSerializer = another . getBooleanSerializer ( ) ; emptyToNullReading = another . isEmptyToNullReading ( ) ; nullToEmptyWriting = another . isNullToEmptyWriting ( ) ; everywhereQuoteWriting = another . isEverywhereQuoteWriting ( ) ; simpleTextReadingFilter = another . getSimpleTextReadingFilter ( ) ; typeableTextReadingFilter = another . getTypeableTextReadingFilter ( ) ; listNullToEmptyReading = another . isListNullToEmptyReading ( ) ; listNullToEmptyWriting = another . isListNullToEmptyWriting ( ) ; fieldNaming = another . getFieldNaming ( ) ; yourCollections = another . getYourCollections ( ) ; yourScalars = another . getYourScalars ( ) ; yourUltimateCustomizer = another . getYourUltimateCustomizer ( ) ; return this ; |
public class Message { /** * post parse */
void resolveReferences ( Message root ) { } } | final Proto proto = getProto ( ) ; final String fullName = getFullName ( ) ; for ( Field < ? > f : fields . values ( ) ) { f . owner = this ; if ( f . isRepeated ( ) ) { repeatedFieldCount ++ ; root . repeatedFieldPresent = true ; } else { singularFieldCount ++ ; if ( f . isRequired ( ) ) { requiredFieldCount ++ ; root . requiredFieldPresent = true ; } } if ( ! annotationPresentOnFields && ! f . annotations . isEmpty ( ) ) annotationPresentOnFields = true ; if ( f instanceof Field . Bytes ) { if ( f . isRepeated ( ) ) repeatedBytesFieldCount ++ ; else { singularBytesFieldCount ++ ; if ( f . isRequired ( ) ) requiredBytesFieldCount ++ ; } if ( ! root . bytesFieldPresent ) root . bytesFieldPresent = true ; if ( ! root . bytesOrStringDefaultValuePresent && f . defaultValue != null ) root . bytesOrStringDefaultValuePresent = true ; } else if ( f instanceof Field . String ) { if ( f . isRepeated ( ) ) repeatedStringFieldCount ++ ; else { singularStringFieldCount ++ ; if ( f . isRequired ( ) ) requiredStringFieldCount ++ ; } if ( ! root . bytesOrStringDefaultValuePresent && f . defaultValue != null ) root . bytesOrStringDefaultValuePresent = true ; } else if ( f instanceof Field . Reference ) { Field . Reference fr = ( Field . Reference ) f ; String refName = fr . refName ; String packageName = fr . packageName ; String fullRefName = ( packageName == null ? refName : packageName + '.' + refName ) ; HasName refObj = proto . findReference ( fullRefName , fullName ) ; if ( refObj instanceof Message ) { MessageField mf = newMessageField ( ( Message ) refObj , fr , this ) ; fields . put ( mf . name , mf ) ; if ( mf . isRepeated ( ) ) repeatedMessageFieldCount ++ ; else { singularMessageFieldCount ++ ; if ( mf . isRequired ( ) ) requiredMessageFieldCount ++ ; } // references inside options
if ( ! mf . standardOptions . isEmpty ( ) ) proto . references . add ( new ConfiguredReference ( mf . standardOptions , mf . extraOptions , fullName ) ) ; continue ; } if ( refObj instanceof EnumGroup ) { EnumField ef = newEnumField ( ( EnumGroup ) refObj , fr , this ) ; fields . put ( ef . name , ef ) ; if ( ef . isRepeated ( ) ) repeatedEnumFieldCount ++ ; else { singularEnumFieldCount ++ ; if ( ef . isRequired ( ) ) requiredEnumFieldCount ++ ; } // references inside options
if ( ! ef . standardOptions . isEmpty ( ) ) proto . references . add ( new ConfiguredReference ( ef . standardOptions , ef . extraOptions , fullName ) ) ; continue ; } throw err ( "unknown field: " + fullRefName , getProto ( ) ) ; } // references inside options
if ( ! f . standardOptions . isEmpty ( ) ) proto . references . add ( new ConfiguredReference ( f . standardOptions , f . extraOptions , fullName ) ) ; } sortedFields . addAll ( fields . values ( ) ) ; Collections . sort ( sortedFields ) ; for ( Extension extension : this . nestedExtensions ) extension . resolveReferences ( ) ; for ( Service s : nestedServices . values ( ) ) s . resolveReferences ( ) ; for ( Message m : nestedMessages . values ( ) ) m . resolveReferences ( root ) ; |
public class Classfile { /** * Read the class ' interfaces .
* @ throws IOException
* if an I / O exception occurs . */
private void readInterfaces ( ) throws IOException { } } | // Interfaces
final int interfaceCount = inputStreamOrByteBuffer . readUnsignedShort ( ) ; for ( int i = 0 ; i < interfaceCount ; i ++ ) { final String interfaceName = getConstantPoolClassName ( inputStreamOrByteBuffer . readUnsignedShort ( ) ) ; if ( implementedInterfaces == null ) { implementedInterfaces = new ArrayList < > ( ) ; } implementedInterfaces . add ( interfaceName ) ; } |
public class Serializers { /** * Returns the serializer given the index .
* @ param index serializer index
* @ return serializer */
Serializer getSerializer ( int index ) { } } | if ( index >= serializersArray . length ) { throw new IllegalArgumentException ( String . format ( "The serializer can't be found at index %d" , index ) ) ; } return serializersArray [ index ] ; |
public class Discover { /** * Filter the air dates to years that are greater than or equal to this year
* @ param year
* @ return */
public Discover firstAirDateYearGte ( int year ) { } } | if ( checkYear ( year ) ) { params . add ( Param . FIRST_AIR_DATE_GTE , year ) ; } return this ; |
public class DSClientFactory { /** * ( non - Javadoc )
* @ see
* com . impetus . kundera . loader . GenericClientFactory # initialize ( java . util . Map ) */
@ Override public void initialize ( Map < String , Object > externalProperty ) { } } | reader = new CassandraEntityReader ( kunderaMetadata ) ; initializePropertyReader ( ) ; setExternalProperties ( externalProperty ) ; configuration = new CassandraHostConfiguration ( externalProperties , CassandraPropertyReader . csmd , getPersistenceUnit ( ) , kunderaMetadata ) ; // initialize timestamp generator .
initializeTimestampGenerator ( externalProperty ) ; |
public class PartialResponseChangesTypeImpl { /** * If not already created , a new < code > insert < / code > element will be created and returned .
* Otherwise , the first existing < code > insert < / code > element will be returned .
* @ return the instance defined for the element < code > insert < / code > */
public PartialResponseInsertType < PartialResponseChangesType < T > > getOrCreateInsert ( ) { } } | List < Node > nodeList = childNode . get ( "insert" ) ; if ( nodeList != null && nodeList . size ( ) > 0 ) { return new PartialResponseInsertTypeImpl < PartialResponseChangesType < T > > ( this , "insert" , childNode , nodeList . get ( 0 ) ) ; } return createInsert ( ) ; |
public class JoinTypeUtil { /** * Converts { @ link FlinkJoinType } to { @ link JoinRelType } . */
public static JoinRelType toJoinRelType ( FlinkJoinType joinType ) { } } | switch ( joinType ) { case INNER : return JoinRelType . INNER ; case LEFT : return JoinRelType . LEFT ; case RIGHT : return JoinRelType . RIGHT ; case FULL : return JoinRelType . FULL ; default : throw new IllegalArgumentException ( "invalid: " + joinType ) ; } |
public class FeatureSourceRetriever { /** * Set the attributes of a feature .
* @ param feature the feature
* @ param attributes the attributes
* @ throws LayerException oops */
public void setAttributes ( Object feature , Map < String , Attribute > attributes ) throws LayerException { } } | for ( Map . Entry < String , Attribute > entry : attributes . entrySet ( ) ) { String name = entry . getKey ( ) ; if ( ! name . equals ( getGeometryAttributeName ( ) ) ) { asFeature ( feature ) . setAttribute ( name , entry . getValue ( ) ) ; } } |
public class AWSStepFunctionsClient { /** * Describes the state machine associated with a specific execution .
* < note >
* This operation is eventually consistent . The results are best effort and may not reflect very recent updates and
* changes .
* < / note >
* @ param describeStateMachineForExecutionRequest
* @ return Result of the DescribeStateMachineForExecution operation returned by the service .
* @ throws ExecutionDoesNotExistException
* The specified execution does not exist .
* @ throws InvalidArnException
* The provided Amazon Resource Name ( ARN ) is invalid .
* @ sample AWSStepFunctions . DescribeStateMachineForExecution
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / states - 2016-11-23 / DescribeStateMachineForExecution "
* target = " _ top " > AWS API Documentation < / a > */
@ Override public DescribeStateMachineForExecutionResult describeStateMachineForExecution ( DescribeStateMachineForExecutionRequest request ) { } } | request = beforeClientExecution ( request ) ; return executeDescribeStateMachineForExecution ( request ) ; |
public class ResourceGroovyMethods { /** * Creates a buffered writer for this file , optionally appending to the
* existing file content .
* @ param file a File
* @ param append true if data should be appended to the file
* @ return a BufferedWriter
* @ throws IOException if an IOException occurs .
* @ since 1.0 */
public static BufferedWriter newWriter ( File file , boolean append ) throws IOException { } } | return new BufferedWriter ( new FileWriter ( file , append ) ) ; |
public class RoundRectangle { /** * Sets the frame and corner dimensions of this rectangle to be equal to those of the supplied
* rectangle . */
public void setRoundRect ( IRoundRectangle rr ) { } } | setRoundRect ( rr . x ( ) , rr . y ( ) , rr . width ( ) , rr . height ( ) , rr . arcWidth ( ) , rr . arcHeight ( ) ) ; |
public class ConcurrentEvictingQueue { /** * Inserts the specified element at the tail of this queue if it is
* possible to do so immediately or if capacity limit is exited
* the oldest element ( the head ) will be evicted , and then the new element added at the tail .
* This method is generally preferable to method { @ link # add } ,
* which can fail to insert an element only by throwing an exception .
* @ throws NullPointerException if the specified element is null */
@ Override public boolean offer ( final E e ) { } } | requireNonNull ( e , ILLEGAL_ELEMENT ) ; Supplier < Boolean > offerElement = ( ) -> { if ( size == 0 ) { ringBuffer [ tailIndex ] = e ; modificationsCount ++ ; size ++ ; } else if ( size == maxSize ) { headIndex = nextIndex ( headIndex ) ; tailIndex = nextIndex ( tailIndex ) ; ringBuffer [ tailIndex ] = e ; modificationsCount ++ ; } else { tailIndex = nextIndex ( tailIndex ) ; ringBuffer [ tailIndex ] = e ; size ++ ; modificationsCount ++ ; } return true ; } ; return writeConcurrently ( offerElement ) ; |
public class AtomDODeserializer { /** * Note : AUDIT datastreams always return false , otherwise defaults to true .
* @ param entry
* @ return */
private boolean getDSVersionable ( DigitalObject obj , Entry entry ) { } } | if ( getDatastreamId ( obj , entry ) . equals ( "AUDIT" ) ) { return false ; } List < Category > versionable = entry . getCategories ( MODEL . VERSIONABLE . uri ) ; if ( versionable . isEmpty ( ) || versionable . size ( ) > 1 ) { return true ; } else { return Boolean . valueOf ( versionable . get ( 0 ) . getTerm ( ) ) ; } |
public class KCVSUtil { /** * Retrieves the value for the specified column and key under the given transaction
* from the store if such exists , otherwise returns NULL
* @ param store Store
* @ param key Key
* @ param column Column
* @ param txh Transaction
* @ return Value for key and column or NULL if such does not exist */
public static StaticBuffer get ( KeyColumnValueStore store , StaticBuffer key , StaticBuffer column , StoreTransaction txh ) throws BackendException { } } | KeySliceQuery query = new KeySliceQuery ( key , column , BufferUtil . nextBiggerBuffer ( column ) ) . setLimit ( 2 ) ; List < Entry > result = store . getSlice ( query , txh ) ; if ( result . size ( ) > 1 ) log . warn ( "GET query returned more than 1 result: store {} | key {} | column {}" , new Object [ ] { store . getName ( ) , key , column } ) ; if ( result . isEmpty ( ) ) return null ; else return result . get ( 0 ) . getValueAs ( StaticBuffer . STATIC_FACTORY ) ; |
public class PriorityQueue { /** * Inserts item x at position k , maintaining heap invariant by promoting x up the tree until it is greater than or equal to its
* parent , or is the root .
* @ param k the position to fill
* @ param x the item to insert */
@ SuppressWarnings ( "unchecked" ) private void siftUp ( int k , E x ) { } } | while ( k > 0 ) { int parent = ( k - 1 ) >>> 1 ; E e = ( E ) queue [ parent ] ; if ( x . compareTo ( e ) >= 0 ) break ; queue [ k ] = e ; k = parent ; } queue [ k ] = x ; |
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link AbstractDatumType } { @ code > }
* @ param value
* Java instance representing xml element ' s value .
* @ return
* the new instance of { @ link JAXBElement } { @ code < } { @ link AbstractDatumType } { @ code > } */
@ XmlElementDecl ( namespace = "http://www.opengis.net/gml" , name = "_Datum" , substitutionHeadNamespace = "http://www.opengis.net/gml" , substitutionHeadName = "Definition" ) public JAXBElement < AbstractDatumType > create_Datum ( AbstractDatumType value ) { } } | return new JAXBElement < AbstractDatumType > ( __Datum_QNAME , AbstractDatumType . class , null , value ) ; |
public class JsonResponseWriter { /** * TODO : add custom mapper . . . to override vertx . mapper if desired */
@ Override public void write ( T result , HttpServerRequest request , HttpServerResponse response ) { } } | if ( result != null ) { response . end ( JsonUtils . toJson ( result , io . vertx . core . json . Json . mapper ) ) ; } else { response . end ( ) ; } |
public class Retry { /** * Wrap an { @ link Observable } so that it will retry on all errors . The retry will occur for a maximum number of
* attempts and with a provided { @ link Delay } between each attempt .
* @ param source the { @ link Observable } to wrap .
* @ param maxAttempts the maximum number of times to attempt a retry . It will be capped at < code > { @ link Integer # MAX _ VALUE } - 1 < / code > .
* @ param retryDelay the { @ link Delay } between each attempt .
* @ param < T > the type of items emitted by the source Observable .
* @ return the wrapped retrying Observable . */
public static < T > Observable < T > wrapForRetry ( Observable < T > source , int maxAttempts , Delay retryDelay ) { } } | return wrapForRetry ( source , new RetryWithDelayHandler ( maxAttempts , retryDelay ) ) ; |
public class JSONUtils { /** * Converts the object to a JSON string using the mapper */
public static String toJSON ( Object value , ObjectMapper mapper ) { } } | try { return mapper . writeValueAsString ( value ) ; } catch ( IOException ex ) { throw new FacebookException ( ex ) ; } |
public class NuAbstractCharsetHandler { /** * Override this to receive decoded Unicode Java string data read from
* stderr .
* Make sure to set the { @ link CharBuffer # position ( ) position } of
* { @ code buffer } to indicate how much data you have read before returning .
* @ param buffer The { @ link CharBuffer } receiving Unicode string data . */
protected void onStderrChars ( CharBuffer buffer , boolean closed , CoderResult coderResult ) { } } | // Consume the entire buffer by default .
buffer . position ( buffer . limit ( ) ) ; |
public class FullTextMapperV2 { /** * Map newly associated / defined classifications for the entity with given GUID
* @ param guid Entity guid
* @ param classifications new classifications added to the entity
* @ return Full text string ONLY for the added classifications
* @ throws AtlasBaseException */
public String getIndexTextForClassifications ( String guid , List < AtlasClassification > classifications ) throws AtlasBaseException { } } | String ret = null ; AtlasEntityWithExtInfo entityWithExtInfo = getAndCacheEntity ( guid ) ; if ( entityWithExtInfo != null ) { StringBuilder sb = new StringBuilder ( ) ; if ( CollectionUtils . isNotEmpty ( classifications ) ) { for ( AtlasClassification classification : classifications ) { sb . append ( classification . getTypeName ( ) ) . append ( FULL_TEXT_DELIMITER ) ; mapAttributes ( classification . getAttributes ( ) , entityWithExtInfo , sb , new HashSet < String > ( ) ) ; } } ret = sb . toString ( ) ; } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "FullTextMapperV2.map({}): {}" , guid , ret ) ; } return ret ; |
public class Parser { /** * Recursively finds in - clauses and reformats them for the parser */
private String alignINClause ( String in ) { } } | String paramIn = in ; final int indexLowerIn = paramIn . indexOf ( IN_LOWER ) ; final int indexLowerInWithParentheses = paramIn . indexOf ( IN_LOWER_P ) ; final int indexUpperIn = paramIn . indexOf ( IN_UPPER ) ; final int indexUpperInWithParentheses = paramIn . indexOf ( IN_UPPER_P ) ; // find first occurrence of in clause .
final int indexIn = findMinIfNot ( indexUpperInWithParentheses , findMinIfNot ( indexUpperIn , findMinIfNot ( indexLowerIn , indexLowerInWithParentheses , NO_INDEX ) , NO_INDEX ) , NO_INDEX ) ; if ( indexIn > NO_INDEX && ( indexIn == indexLowerInWithParentheses || indexIn == indexUpperInWithParentheses ) ) { // 3 is the size of param in ending with a parentheses .
// add SPLIT _ EXPRESSION
paramIn = paramIn . substring ( 0 , indexIn + 3 ) + SPLIT_EXPRESSION + paramIn . substring ( indexIn + 3 ) ; } String sql = paramIn ; if ( indexIn != NO_INDEX ) { final int indexOpen = paramIn . indexOf ( '(' , indexIn ) ; final int indexClose = paramIn . indexOf ( ')' , indexOpen ) ; String sub = paramIn . substring ( indexOpen , indexClose + 1 ) ; sub = sub . replaceAll ( " " , "" ) ; sql = paramIn . substring ( 0 , indexOpen ) + sub + alignINClause ( paramIn . substring ( indexClose + 1 ) ) ; } return sql ; |
public class NumberInRange { /** * Test if a number is in an arbitrary range .
* @ param number
* a number
* @ param min
* lower boundary of the range
* @ param max
* upper boundary of the range
* @ return true if the given number is within the range */
@ ArgumentsChecked @ Throws ( IllegalNullArgumentException . class ) public static boolean isInRange ( @ Nonnull final Number number , @ Nonnull final BigDecimal min , @ Nonnull final BigDecimal max ) { } } | Check . notNull ( number , "number" ) ; Check . notNull ( min , "min" ) ; Check . notNull ( max , "max" ) ; BigDecimal bigDecimal = null ; if ( number instanceof Byte || number instanceof Short || number instanceof Integer || number instanceof Long ) { bigDecimal = new BigDecimal ( number . longValue ( ) ) ; } else if ( number instanceof Float || number instanceof Double ) { bigDecimal = new BigDecimal ( number . doubleValue ( ) ) ; } else if ( number instanceof BigInteger ) { bigDecimal = new BigDecimal ( ( BigInteger ) number ) ; } else if ( number instanceof BigDecimal ) { bigDecimal = ( BigDecimal ) number ; } else { throw new IllegalNumberArgumentException ( "Return value is no known subclass of 'java.lang.Number': " + number . getClass ( ) . getName ( ) ) ; } return max . compareTo ( bigDecimal ) >= 0 && min . compareTo ( bigDecimal ) <= 0 ; |
public class MurmurHash3Adaptor { /** * Returns a deterministic uniform random integer between zero ( inclusive ) and
* n ( exclusive ) given the input datum .
* @ param datum the given String .
* @ param n The upper exclusive bound of the integers produced . Must be & gt ; 1.
* @ return deterministic uniform random integer */
public static int asInt ( final String datum , final int n ) { } } | if ( ( datum == null ) || datum . isEmpty ( ) ) { throw new SketchesArgumentException ( "Input is null or empty." ) ; } final byte [ ] data = datum . getBytes ( UTF_8 ) ; return asInteger ( toLongArray ( data ) , n ) ; // data is byte [ ] |
public class Util { /** * This returns well known empty values for well - known java types . This returns null for types not
* in the following list .
* < ul >
* < li > { @ code [ Bb ] oolean } < / li >
* < li > { @ code byte [ ] } < / li >
* < li > { @ code Collection } < / li >
* < li > { @ code Iterator } < / li >
* < li > { @ code List } < / li >
* < li > { @ code Map } < / li >
* < li > { @ code Set } < / li >
* < / ul >
* When { @ link Feign . Builder # decode404 ( ) decoding HTTP 404 status } , you ' ll need to teach decoders
* a default empty value for a type . This method cheaply supports typical types by only looking at
* the raw type ( vs type hierarchy ) . Decorate for sophistication . */
public static Object emptyValueOf ( Type type ) { } } | return EMPTIES . getOrDefault ( Types . getRawType ( type ) , ( ) -> null ) . get ( ) ; |
public class ReplacingAttributeAdder { /** * / * ( non - Javadoc )
* @ see org . jasig . services . persondir . support . merger . BaseAdditiveAttributeMerger # mergePersonAttributes ( java . util . Map , java . util . Map ) */
@ Override protected Map < String , List < Object > > mergePersonAttributes ( final Map < String , List < Object > > toModify , final Map < String , List < Object > > toConsider ) { } } | Validate . notNull ( toModify , "toModify cannot be null" ) ; Validate . notNull ( toConsider , "toConsider cannot be null" ) ; toModify . putAll ( toConsider ) ; return toModify ; |
public class ModelGenerator { /** * Instrospects the provided class , creates a model object ( JS code ) and writes it
* into the response . Creates compressed JS code . Method ignores any validation
* annotations .
* @ param request the http servlet request
* @ param response the http servlet response
* @ param clazz class that the generator should introspect
* @ param format specifies which code ( ExtJS or Touch ) the generator should create .
* @ throws IOException
* @ see # writeModel ( HttpServletRequest , HttpServletResponse , Class , OutputFormat ,
* boolean ) */
public static void writeModel ( HttpServletRequest request , HttpServletResponse response , Class < ? > clazz , OutputFormat format ) throws IOException { } } | writeModel ( request , response , clazz , format , IncludeValidation . NONE , false ) ; |
public class CSSMediaRuleImpl { /** * { @ inheritDoc } */
@ Override public void setCssText ( final String cssText ) throws DOMException { } } | try { final CSSOMParser parser = new CSSOMParser ( ) ; final AbstractCSSRuleImpl r = parser . parseRule ( cssText ) ; // The rule must be a media rule
if ( r instanceof CSSMediaRuleImpl ) { mediaList_ = ( ( CSSMediaRuleImpl ) r ) . mediaList_ ; cssRules_ = ( ( CSSMediaRuleImpl ) r ) . cssRules_ ; } else { throw new DOMExceptionImpl ( DOMException . INVALID_MODIFICATION_ERR , DOMExceptionImpl . EXPECTING_MEDIA_RULE ) ; } } catch ( final CSSException e ) { throw new DOMExceptionImpl ( DOMException . SYNTAX_ERR , DOMExceptionImpl . SYNTAX_ERROR , e . getMessage ( ) ) ; } catch ( final IOException e ) { throw new DOMExceptionImpl ( DOMException . SYNTAX_ERR , DOMExceptionImpl . SYNTAX_ERROR , e . getMessage ( ) ) ; } |
public class AtomicDouble { /** * Add { @ code amount } to the value and return the new value . */
public double addAndGet ( double amount ) { } } | long v ; double d ; double n ; long next ; do { v = value . get ( ) ; d = Double . longBitsToDouble ( v ) ; n = d + amount ; next = Double . doubleToLongBits ( n ) ; } while ( ! value . compareAndSet ( v , next ) ) ; return n ; |
public class AsyncAtlasInfo { /** * This method is a sync parse to the JSON stream of atlas information .
* @ return List of atlas information . */
public static List < GVRAtlasInformation > loadAtlasInformation ( InputStream ins ) { } } | try { int size = ins . available ( ) ; byte [ ] buffer = new byte [ size ] ; ins . read ( buffer ) ; return loadAtlasInformation ( new JSONArray ( new String ( buffer , "UTF-8" ) ) ) ; } catch ( JSONException je ) { je . printStackTrace ( ) ; } catch ( IOException ex ) { ex . printStackTrace ( ) ; } return null ; |
public class FindingReplacing { /** * Sets the substring in given left index and right index as the delegate string
* < p > < b > The look result same as { @ link StrMatcher # finder ( ) } ' s behavior < / b >
* @ see StrMatcher # finder ( )
* @ param leftIndex
* @ param rightIndex
* @ return */
public NegateMultiPos < S , Integer , Integer > setBetns ( int leftIndex , int rightIndex ) { } } | return new NegateMultiPos < S , Integer , Integer > ( leftIndex , rightIndex ) { @ Override protected S result ( ) { return delegateQueue ( 'I' , left , right , pos , position , null , plusminus , filltgt ) ; } } ; |
public class Classification { /** * Getter method for the instance variable { @ link # multipleSelect } .
* @ return value of instance variable { @ link # multipleSelect }
* @ throws CacheReloadException on error */
public boolean isMultipleSelect ( ) throws CacheReloadException { } } | final boolean ret ; if ( isRoot ( ) ) { ret = this . multipleSelect ; } else { ret = getParentClassification ( ) . isMultipleSelect ( ) ; } return ret ; |
public class MulitServiceThriftConnecion { /** * 创建原始连接的方法
* @ throws ThriftConnectionPoolException
* 创建连接出现问题时抛出该异常 */
@ SuppressWarnings ( "unchecked" ) private void createConnection ( ) throws ThriftConnectionPoolException { } } | try { transport = new TSocket ( host , port , connectionTimeOut ) ; transport . open ( ) ; TProtocol protocol = createTProtocol ( transport ) ; Iterator < Entry < String , Class < ? extends TServiceClient > > > iterator = thriftClientClasses . entrySet ( ) . iterator ( ) ; while ( iterator . hasNext ( ) ) { Entry < String , Class < ? extends TServiceClient > > entry = iterator . next ( ) ; String serviceName = entry . getKey ( ) ; Class < ? extends TServiceClient > clientClass = entry . getValue ( ) ; TMultiplexedProtocol multiProtocol = new TMultiplexedProtocol ( protocol , serviceName ) ; // 反射实例化客户端对象
Constructor < ? extends TServiceClient > clientConstructor = clientClass . getConstructor ( TProtocol . class ) ; T client = ( T ) clientConstructor . newInstance ( multiProtocol ) ; clients . put ( serviceName , client ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "创建新连接成功:" + host + " 端口:" + port ) ; } } } catch ( Exception e ) { e . printStackTrace ( ) ; throw new ThriftConnectionPoolException ( "无法连接服务器:" + host + " 端口:" + port , e ) ; } |
public class YCbCr2RGBColorConvertor { /** * ( non - Javadoc )
* @ see com . alibaba . simpleimage . codec . util . ColorConvertor # convert ( int [ ] , int ) */
@ Override public long convert ( int [ ] input , int inPos ) { } } | int Y = input [ inPos ++ ] & 0xFF ; int Cb = input [ inPos ++ ] & 0xFF ; int Cr = input [ inPos ] & 0xFF ; byte r = ( byte ) sampleRangeLimitTable [ sampleRangeLimitOffset + Y + Cr2R [ Cr ] ] ; byte g = ( byte ) sampleRangeLimitTable [ sampleRangeLimitOffset + Y + ( ( Cb2G [ Cb ] + Cr2G [ Cr ] ) >> 16 ) ] ; byte b = ( byte ) sampleRangeLimitTable [ sampleRangeLimitOffset + Y + Cb2B [ Cb ] ] ; return ( 0xFF000000L | ( ( r & 0xFF ) << 16 ) | ( ( g & 0xFF ) << 8 ) | ( b & 0xFF ) ) ; |
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
@ Override public EClass getIfcDoorPanelProperties ( ) { } } | if ( ifcDoorPanelPropertiesEClass == null ) { ifcDoorPanelPropertiesEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 194 ) ; } return ifcDoorPanelPropertiesEClass ; |
public class GenerationalDistance { /** * Returns the generational distance value for a given front
* @ param front The front
* @ param referenceFront The reference pareto front */
public double generationalDistance ( Front front , Front referenceFront ) { } } | double sum = 0.0 ; for ( int i = 0 ; i < front . getNumberOfPoints ( ) ; i ++ ) { sum += Math . pow ( FrontUtils . distanceToClosestPoint ( front . getPoint ( i ) , referenceFront ) , pow ) ; } sum = Math . pow ( sum , 1.0 / pow ) ; return sum / front . getNumberOfPoints ( ) ; |
public class HeapCache { /** * Execute job while making sure that no other operations are going on .
* In case the eviction is connected via a queue we need to stop the queue processing .
* On the other hand we needs to make sure that the queue is drained because this
* method is used to access the recent statistics or check integrity . Draining the queue
* is a two phase job : The draining may not do eviction since we hold the locks , after
* lifting the lock with do eviction and lock again . This ensures that all
* queued entries are processed up to the point when the method was called .
* @ param _ checkClosed variant , this method is needed once without check during the close itself */
private < T > T executeWithGlobalLock ( final Job < T > job , final boolean _checkClosed ) { } } | synchronized ( lock ) { if ( _checkClosed ) { checkClosed ( ) ; } eviction . stop ( ) ; try { T _result = hash . runTotalLocked ( new Job < T > ( ) { @ Override public T call ( ) { if ( _checkClosed ) { checkClosed ( ) ; } boolean f = eviction . drain ( ) ; if ( f ) { return ( T ) RESTART_AFTER_EVICTION ; } return eviction . runLocked ( new Job < T > ( ) { @ Override public T call ( ) { return job . call ( ) ; } } ) ; } } ) ; if ( _result == RESTART_AFTER_EVICTION ) { eviction . evictEventually ( ) ; _result = hash . runTotalLocked ( new Job < T > ( ) { @ Override public T call ( ) { if ( _checkClosed ) { checkClosed ( ) ; } eviction . drain ( ) ; return eviction . runLocked ( new Job < T > ( ) { @ Override public T call ( ) { return job . call ( ) ; } } ) ; } } ) ; } return _result ; } finally { eviction . start ( ) ; } } |
public class Annotations { /** * Attach @ Activation
* @ param annotation The activation annotation
* @ param classLoader The class loader
* @ param configProperties The config properties
* @ param plainConfigProperties The plain config properties
* @ return The updated metadata
* @ exception Exception Thrown if an error occurs */
private ArrayList < MessageListener > attachActivation ( Annotation annotation , ClassLoader classLoader , ArrayList < ConfigProperty > configProperties , ArrayList < ConfigProperty > plainConfigProperties ) throws Exception { } } | ArrayList < ConfigProperty > validProperties = new ArrayList < ConfigProperty > ( ) ; ArrayList < RequiredConfigProperty > requiredConfigProperties = null ; if ( configProperties != null ) { for ( ConfigProperty configProperty : configProperties ) { if ( annotation . getClassName ( ) . equals ( ( ( ConfigPropertyImpl ) configProperty ) . getAttachedClassName ( ) ) ) { validProperties . add ( configProperty ) ; if ( configProperty . isMandatory ( ) ) { if ( requiredConfigProperties == null ) requiredConfigProperties = new ArrayList < RequiredConfigProperty > ( 1 ) ; requiredConfigProperties . add ( new RequiredConfigPropertyImpl ( null , configProperty . getConfigPropertyName ( ) , null ) ) ; } } } } if ( plainConfigProperties != null ) { Set < String > asClasses = getClasses ( annotation . getClassName ( ) , classLoader ) ; for ( ConfigProperty configProperty : plainConfigProperties ) { if ( asClasses . contains ( ( ( ConfigPropertyImpl ) configProperty ) . getAttachedClassName ( ) ) ) { validProperties . add ( configProperty ) ; if ( configProperty . isMandatory ( ) ) { if ( requiredConfigProperties == null ) requiredConfigProperties = new ArrayList < RequiredConfigProperty > ( 1 ) ; requiredConfigProperties . add ( new RequiredConfigPropertyImpl ( null , configProperty . getConfigPropertyName ( ) , null ) ) ; } } } } validProperties . trimToSize ( ) ; Activation activation = ( Activation ) annotation . getAnnotation ( ) ; ArrayList < MessageListener > messageListeners = null ; if ( trace ) log . trace ( "Processing: " + activation ) ; if ( activation . messageListeners ( ) != null ) { messageListeners = new ArrayList < MessageListener > ( activation . messageListeners ( ) . length ) ; for ( Class asClass : activation . messageListeners ( ) ) { Activationspec asMeta = new ActivationSpecImpl ( new XsdString ( annotation . getClassName ( ) , null ) , requiredConfigProperties , validProperties , null ) ; MessageListener mlMeta = new MessageListenerImpl ( new XsdString ( asClass . getName ( ) , null ) , asMeta , null ) ; messageListeners . add ( mlMeta ) ; } } return messageListeners ; |
public class PortletAdministrationHelper { /** * Add to the form SUBSCRIBE , BROWSE , and CONFIGURE activity permissions , along with their principals ,
* assigned to the portlet . */
private void addPrincipalPermissionsToForm ( IPortletDefinition def , PortletDefinitionForm form ) { } } | final String portletTargetId = PermissionHelper . permissionTargetIdForPortletDefinition ( def ) ; final Set < JsonEntityBean > principalBeans = new HashSet < > ( ) ; Map < String , IPermissionManager > permManagers = new HashMap < > ( ) ; for ( PortletPermissionsOnForm perm : PortletPermissionsOnForm . values ( ) ) { if ( ! permManagers . containsKey ( perm . getOwner ( ) ) ) { permManagers . put ( perm . getOwner ( ) , authorizationService . newPermissionManager ( perm . getOwner ( ) ) ) ; } final IPermissionManager pm = permManagers . get ( perm . getOwner ( ) ) ; /* Obtain the principals that have permission for the activity on this portlet */
final IAuthorizationPrincipal [ ] principals = pm . getAuthorizedPrincipals ( perm . getActivity ( ) , portletTargetId ) ; for ( IAuthorizationPrincipal principal : principals ) { JsonEntityBean principalBean ; // first assume this is a group
final IEntityGroup group = GroupService . findGroup ( principal . getKey ( ) ) ; if ( group != null ) { // principal is a group
principalBean = new JsonEntityBean ( group , EntityEnum . GROUP ) ; } else { // not a group , so it must be a person
final IGroupMember member = authorizationService . getGroupMember ( principal ) ; principalBean = new JsonEntityBean ( member , EntityEnum . PERSON ) ; // set the name
final String name = groupListHelper . lookupEntityName ( principalBean ) ; principalBean . setName ( name ) ; } principalBeans . add ( principalBean ) ; form . addPermission ( principalBean . getTypeAndIdHash ( ) + "_" + perm . getActivity ( ) ) ; } } form . setPrincipals ( principalBeans , false ) ; |
public class BlockingThreadPoolExecutorService { /** * A thread pool that that blocks clients submitting additional tasks if
* there are already { @ code activeTasks } running threads and { @ code
* waitingTasks } tasks waiting in its queue .
* @ param activeTasks maximum number of active tasks
* @ param waitingTasks maximum number of waiting tasks
* @ param keepAliveTime time until threads are cleaned up in { @ code unit }
* @ param unit time unit
* @ param prefixName prefix of name for threads */
public static BlockingThreadPoolExecutorService newInstance ( int activeTasks , int waitingTasks , long keepAliveTime , TimeUnit unit , String prefixName ) { } } | /* Although we generally only expect up to waitingTasks tasks in the
queue , we need to be able to buffer all tasks in case dequeueing is
slower than enqueueing . */
final BlockingQueue < Runnable > workQueue = new LinkedBlockingQueue < > ( waitingTasks + activeTasks ) ; ThreadPoolExecutor eventProcessingExecutor = new ThreadPoolExecutor ( activeTasks , activeTasks , keepAliveTime , unit , workQueue , newDaemonThreadFactory ( prefixName ) , new RejectedExecutionHandler ( ) { @ Override public void rejectedExecution ( Runnable r , ThreadPoolExecutor executor ) { // This is not expected to happen .
LOG . error ( "Could not submit task to executor {}" , executor . toString ( ) ) ; } } ) ; eventProcessingExecutor . allowCoreThreadTimeOut ( true ) ; return new BlockingThreadPoolExecutorService ( waitingTasks + activeTasks , eventProcessingExecutor ) ; |
public class Hierarchy { /** * Determine if method whose name and signature is specified is a monitor
* wait operation .
* @ param methodName
* name of the method
* @ param methodSig
* signature of the method
* @ return true if the method is a monitor wait , false if not */
public static boolean isMonitorWait ( String methodName , String methodSig ) { } } | return "wait" . equals ( methodName ) && ( "()V" . equals ( methodSig ) || "(J)V" . equals ( methodSig ) || "(JI)V" . equals ( methodSig ) ) ; |
public class OsLoginServiceClient { /** * Retrieves an SSH public key .
* < p > Sample code :
* < pre > < code >
* try ( OsLoginServiceClient osLoginServiceClient = OsLoginServiceClient . create ( ) ) {
* FingerprintName name = FingerprintName . of ( " [ USER ] " , " [ FINGERPRINT ] " ) ;
* SshPublicKey response = osLoginServiceClient . getSshPublicKey ( name . toString ( ) ) ;
* < / code > < / pre >
* @ param name The fingerprint of the public key to retrieve . Public keys are identified by their
* SHA - 256 fingerprint . The fingerprint of the public key is in format
* ` users / { user } / sshPublicKeys / { fingerprint } ` .
* @ throws com . google . api . gax . rpc . ApiException if the remote call fails */
public final SshPublicKey getSshPublicKey ( String name ) { } } | GetSshPublicKeyRequest request = GetSshPublicKeyRequest . newBuilder ( ) . setName ( name ) . build ( ) ; return getSshPublicKey ( request ) ; |
public class IdentityHashMapJsonDeserializer { /** * < p > newInstance < / p >
* @ param keyDeserializer { @ link KeyDeserializer } used to deserialize the keys .
* @ param valueDeserializer { @ link JsonDeserializer } used to deserialize the values .
* @ param < K > Type of the keys inside the { @ link IdentityHashMap }
* @ param < V > Type of the values inside the { @ link IdentityHashMap }
* @ return a new instance of { @ link IdentityHashMapJsonDeserializer } */
public static < K , V > IdentityHashMapJsonDeserializer < K , V > newInstance ( KeyDeserializer < K > keyDeserializer , JsonDeserializer < V > valueDeserializer ) { } } | return new IdentityHashMapJsonDeserializer < K , V > ( keyDeserializer , valueDeserializer ) ; |
public class PathFinderImpl { /** * Update the open and closed list to find the path .
* @ param mover The entity that will be moving along the path .
* @ param stx The x coordinate of the start location .
* @ param sty The y coordinate of the start location .
* @ param dtx The x coordinate of the destination location .
* @ param dty The y coordinate of the destination location .
* @ param ignoreRef The ignore map array reference checking ( < code > true < / code > to ignore references ) .
* @ param current The current node .
* @ param maxDepth The last max depth .
* @ return The next max depth . */
private int updateList ( Pathfindable mover , int stx , int sty , int dtx , int dty , boolean ignoreRef , Node current , int maxDepth ) { } } | int depth = maxDepth ; final Tile tile = map . getTile ( current . getX ( ) , current . getY ( ) ) ; final TilePath tilePath = tile . getFeature ( TilePath . class ) ; for ( int y = - 1 ; y < 2 ; y ++ ) { for ( int x = - 1 ; x < 2 ; x ++ ) { if ( ! ( x == 0 && y == 0 ) ) { depth = check ( tilePath , depth , x , y , mover , stx , sty , dtx , dty , ignoreRef , current , maxDepth ) ; } } } return depth ; |
public class WebbUtils { /** * Read an < code > InputStream < / code > into < code > byte [ ] < / code > until EOF .
* < br >
* Does not close the InputStream !
* @ param is the stream to read the bytes from
* @ return all read bytes as an array
* @ throws IOException when read or write operation fails */
public static byte [ ] readBytes ( InputStream is ) throws IOException { } } | if ( is == null ) { return null ; } ByteArrayOutputStream baos = new ByteArrayOutputStream ( ) ; copyStream ( is , baos ) ; return baos . toByteArray ( ) ; |
public class TextToSpeechWebSocketListener { /** * ( non - Javadoc )
* @ see okhttp3 . WebSocketListener # onOpen ( okhttp3 . WebSocket , okhttp3 . Response ) */
@ Override public void onOpen ( final WebSocket webSocket , Response response ) { } } | callback . onConnected ( ) ; this . socket = webSocket ; if ( ! this . socket . send ( buildStartMessage ( this . options ) ) ) { callback . onError ( new IOException ( "WebSocket unavailable" ) ) ; } else { new Thread ( TEXT_TO_WEB_SOCKET ) { @ Override public void run ( ) { sendText ( ) ; if ( socketOpen && ! socket . send ( buildStopMessage ( ) ) ) { LOG . log ( Level . SEVERE , "Stop message discarded because WebSocket is unavailable" ) ; } } } ; } |
public class TextRetinaApiImpl { /** * { @ inheritDoc } */
@ Override public List < String > getTokens ( String text , PosTag [ ] posTags ) throws ApiException { } } | if ( isEmpty ( text ) ) { throw new IllegalArgumentException ( NULL_TEXT_MSG ) ; } LOG . debug ( "Retrieve tokens for the text: " + text ) ; return this . api . getTokensForText ( text , cluePosTags ( posTags ) , retinaName ) ; |
public class BrokerSession { /** * Request authentication from the server .
* @ param username User name .
* @ param password Password .
* @ param division Login division ( may be null ) .
* @ return Result of authentication . */
public AuthResult authenticate ( String username , String password , String division ) { } } | ensureConnection ( ) ; if ( isAuthenticated ( ) ) { return new AuthResult ( "0" ) ; } String av = username + ";" + password ; List < String > results = callRPCList ( "RGNETBRP AUTH:" + Constants . VERSION , null , connectionParams . getAppid ( ) , getLocalName ( ) , "" , // This is the pre - authentication token
";" . equals ( av ) ? av : Security . encrypt ( av , serverCaps . getCipherKey ( ) ) , getLocalAddress ( ) , division ) ; AuthResult authResult = new AuthResult ( results . get ( 0 ) ) ; if ( authResult . status . succeeded ( ) ) { setPostLoginMessage ( results . subList ( 2 , results . size ( ) ) ) ; init ( results . get ( 1 ) ) ; } return authResult ; |
public class SessionDataManager { /** * Merges incoming property data with changes stored in this log i . e : 1 . incoming data still not modified
* if there are no corresponding changes 2 . incoming data is refreshed with corresponding changes
* if any 3 . new datas is added from changes 4 . if chaged data is marked as " deleted " it removes
* from outgoing list WARN . THIS METHOD HAS SIBLING - mergeList , see below .
* @ param rootData
* @ param listOnly
* @ return */
protected List < ? extends ItemData > mergeProps ( ItemData rootData , boolean listOnly , DataManager dataManager ) throws RepositoryException { } } | // 1 get all transient descendants
Collection < ItemState > transientDescendants = changesLog . getLastChildrenStates ( rootData , false ) ; if ( ! transientDescendants . isEmpty ( ) ) { // 2 get ALL persisted descendants
Map < String , ItemData > descendants = new LinkedHashMap < String , ItemData > ( ) ; traverseStoredDescendants ( rootData , dataManager , MERGE_PROPS , descendants , listOnly , transientDescendants ) ; // merge data
for ( ItemState state : transientDescendants ) { ItemData data = state . getData ( ) ; if ( ! state . isDeleted ( ) ) { descendants . put ( data . getIdentifier ( ) , data ) ; } else { descendants . remove ( data . getIdentifier ( ) ) ; } } Collection < ItemData > desc = descendants . values ( ) ; return new ArrayList < ItemData > ( desc ) ; } else { return dataManager . getChildPropertiesData ( ( NodeData ) rootData ) ; } |
public class ResourceIndexImpl { /** * { @ inheritDoc } */
public int countTriples ( String queryLang , String tupleQuery , String tripleTemplate , int limit , boolean distinct ) throws TrippiException { } } | return _writer . countTriples ( queryLang , tupleQuery , tripleTemplate , limit , distinct ) ; |
public class FullDemo { /** * getConstraints , This returns a grid bag constraints object that can be used for placing a
* component appropriately into a grid bag layout . */
private static GridBagConstraints getConstraints ( int gridx , int gridy , int gridwidth ) { } } | GridBagConstraints gc = new GridBagConstraints ( ) ; gc . fill = GridBagConstraints . NONE ; gc . anchor = GridBagConstraints . WEST ; gc . gridx = gridx ; gc . gridy = gridy ; gc . gridwidth = gridwidth ; return gc ; |
public class CPDefinitionOptionValueRelPersistenceImpl { /** * Removes all the cp definition option value rels where uuid = & # 63 ; from the database .
* @ param uuid the uuid */
@ Override public void removeByUuid ( String uuid ) { } } | for ( CPDefinitionOptionValueRel cpDefinitionOptionValueRel : findByUuid ( uuid , QueryUtil . ALL_POS , QueryUtil . ALL_POS , null ) ) { remove ( cpDefinitionOptionValueRel ) ; } |
public class TorqueDBHandling { /** * Creates the db - creation sql script ( but does not perform it ) .
* @ throws PlatformException If some error occurred */
public void createCreationScript ( ) throws PlatformException { } } | Project project = new Project ( ) ; TorqueDataModelTask modelTask = new TorqueDataModelTask ( ) ; File tmpDir = null ; File scriptFile = null ; _creationScript = null ; try { tmpDir = new File ( getWorkDir ( ) , "schemas" ) ; tmpDir . mkdir ( ) ; String includes = writeSchemata ( tmpDir ) ; scriptFile = new File ( tmpDir , CREATION_SCRIPT_NAME ) ; project . setBasedir ( tmpDir . getAbsolutePath ( ) ) ; // populating with defaults
modelTask . setProject ( project ) ; modelTask . setUseClasspath ( true ) ; modelTask . setControlTemplate ( "sql/db-init/Control.vm" ) ; modelTask . setOutputDirectory ( tmpDir ) ; modelTask . setOutputFile ( CREATION_SCRIPT_NAME ) ; modelTask . setTargetDatabase ( _targetDatabase ) ; FileSet files = new FileSet ( ) ; files . setDir ( tmpDir ) ; files . setIncludes ( includes ) ; modelTask . addFileset ( files ) ; modelTask . execute ( ) ; _creationScript = readTextCompressed ( scriptFile ) ; deleteDir ( tmpDir ) ; } catch ( Exception ex ) { // clean - up
if ( ( tmpDir != null ) && tmpDir . exists ( ) ) { deleteDir ( tmpDir ) ; } throw new PlatformException ( ex ) ; } |
public class BillingStateCalculator { /** * Package scope for testing */
Invoice earliest ( final SortedSet < Invoice > unpaidInvoices ) { } } | try { return unpaidInvoices . first ( ) ; } catch ( NoSuchElementException e ) { return null ; } |
public class ScrollSpy { /** * Attaches ScrollSpy to specified object with specified target element .
* @ param spyOn Spy on this object
* @ param target Target element having an ID
* @ return ScrollSpy */
public static ScrollSpy scrollSpy ( final UIObject spyOn , final HasId target ) { } } | return new ScrollSpy ( spyOn . getElement ( ) , target ) ; |
public class CmsDefaultPageEditor { /** * Performs the preview page action in a new browser window . < p >
* @ throws IOException if redirect fails
* @ throws JspException if inclusion of error page fails */
public void actionPreview ( ) throws IOException , JspException { } } | try { // save content of the editor to the temporary file
performSaveContent ( getParamElementname ( ) , getElementLocale ( ) ) ; } catch ( CmsException e ) { // show error page
showErrorPage ( this , e ) ; } // redirect to the temporary file with current active element language
String param = "?" + org . opencms . i18n . CmsLocaleManager . PARAMETER_LOCALE + "=" + getParamElementlanguage ( ) ; sendCmsRedirect ( getParamTempfile ( ) + param ) ; |
public class StoreUtils { /** * Implements get by delegating to getAll . */
public static < K , V , T > List < Versioned < V > > get ( Store < K , V , T > storageEngine , K key , T transform ) { } } | Map < K , List < Versioned < V > > > result = storageEngine . getAll ( Collections . singleton ( key ) , Collections . singletonMap ( key , transform ) ) ; if ( result . size ( ) > 0 ) return result . get ( key ) ; else return Collections . emptyList ( ) ; |
public class ErrorReporterImpl { /** * { @ inheritDoc } */
@ Override public String putCustomData ( @ NonNull String key , @ Nullable String value ) { } } | return customData . put ( key , value ) ; |
public class ListAuditTasksRequestMarshaller { /** * Marshall the given parameter object . */
public void marshall ( ListAuditTasksRequest listAuditTasksRequest , ProtocolMarshaller protocolMarshaller ) { } } | if ( listAuditTasksRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( listAuditTasksRequest . getStartTime ( ) , STARTTIME_BINDING ) ; protocolMarshaller . marshall ( listAuditTasksRequest . getEndTime ( ) , ENDTIME_BINDING ) ; protocolMarshaller . marshall ( listAuditTasksRequest . getTaskType ( ) , TASKTYPE_BINDING ) ; protocolMarshaller . marshall ( listAuditTasksRequest . getTaskStatus ( ) , TASKSTATUS_BINDING ) ; protocolMarshaller . marshall ( listAuditTasksRequest . getNextToken ( ) , NEXTTOKEN_BINDING ) ; protocolMarshaller . marshall ( listAuditTasksRequest . getMaxResults ( ) , MAXRESULTS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; } |
public class OptimizeParameters { /** * Adjust the provided Parameter objects value created by # findFixedArguments for " rest " value
* and side - effects which might prevent the motion of the parameters from the call sites to the
* function body .
* @ param parameters A list of Parameter objects summarizing all the call - sites and
* whether any of the parameters are fixed .
* @ return Whether there are any movable parameters . */
private static boolean adjustForConstraints ( Node fn , List < Parameter > parameters ) { } } | JSDocInfo info = NodeUtil . getBestJSDocInfo ( fn ) ; if ( info != null && info . isNoInline ( ) ) { return false ; } Node paramList = NodeUtil . getFunctionParameters ( fn ) ; Node lastFormal = paramList . getLastChild ( ) ; int restIndex = Integer . MAX_VALUE ; int lastNonRestFormal = paramList . getChildCount ( ) - 1 ; Node formal = lastFormal ; if ( lastFormal != null && lastFormal . isRest ( ) ) { restIndex = lastNonRestFormal ; lastNonRestFormal -- ; formal = formal . getPrevious ( ) ; } // A parameter with side - effects can move if there are no following parameters
// that can be affected .
// A parameter can be moved if it can ' t be side - effected ( a literal ) ,
// or there are no following side - effects , that aren ' t moved .
boolean anyMovable = false ; boolean seenUnmovableSideEffects = false ; boolean seenUnmoveableSideEffected = false ; boolean allRestValueRemovable = true ; for ( int i = parameters . size ( ) - 1 ; i >= 0 ; i -- ) { Parameter current = parameters . get ( i ) ; // back - off for default values whose default value maybe needed .
// TODO ( johnlenz ) : handle used default
if ( i <= lastNonRestFormal ) { if ( formal . isDefaultValue ( ) && current . mayBeUndefined ) { current . shouldRemove = false ; } formal = formal . getPrevious ( ) ; } // Preserve side - effect ordering , don ' t move this parameter if :
// * the current parameter has side - effects and a following
// parameters that will not be move can be effected .
// * the current parameter can be effected and a following
// parameter that will not be moved has side - effects
if ( current . shouldRemove && ( ( seenUnmovableSideEffects && current . canBeSideEffected ( ) ) || ( seenUnmoveableSideEffected && current . hasSideEffects ( ) ) ) ) { current . shouldRemove = false ; } // If any values that are part of the rest cannot be moved to the function body ,
// then all the rest values must remain at the callsite .
if ( i >= restIndex ) { if ( allRestValueRemovable ) { if ( ! current . shouldRemove ) { anyMovable = false ; allRestValueRemovable = false ; // revisit the trailing params and remark them now that we know they are unremovable .
for ( int j = i + 1 ; j < parameters . size ( ) ; j ++ ) { Parameter p = parameters . get ( 0 ) ; p . shouldRemove = false ; if ( p . canBeSideEffected ) { seenUnmoveableSideEffected = true ; } if ( p . hasSideEffects ) { seenUnmovableSideEffects = true ; } } } } else { current . shouldRemove = false ; } } if ( current . shouldRemove ) { anyMovable = true ; } else { if ( current . canBeSideEffected ) { seenUnmoveableSideEffected = true ; } if ( current . hasSideEffects ) { seenUnmovableSideEffects = true ; } } } return anyMovable ; |
public class SnapshotCommand { /** * { @ inheritDoc } */
@ Override protected void perform ( Wave wave ) throws CommandException { } } | final SnapshotWaveBean wb = waveBean ( wave ) ; WritableImage image = wb . image ( ) ; if ( wb . node ( ) == null ) { final Scene scene = localFacade ( ) . globalFacade ( ) . application ( ) . scene ( ) ; image = scene . snapshot ( image ) ; } else { image = wb . node ( ) . snapshot ( wb . parameters ( ) , image ) ; } wb . image ( image ) ; |
public class PubSubRealization { /** * Attaches to a created DurableSubscription
* Checks that there are no active subscriptions ( unless it supports cloning )
* Checks that the durable subscription exists :
* @ param consumerPoint */
public ConsumableKey attachToDurableSubscription ( LocalConsumerPoint consumerPoint , ConsumerDispatcherState subState ) throws SIDurableSubscriptionMismatchException , SIDurableSubscriptionNotFoundException , SIDestinationLockedException , SISelectorSyntaxException , SIDiscriminatorSyntaxException , SINotPossibleInCurrentConfigurationException , SIResourceException { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "attachToDurableSubscription" , new Object [ ] { consumerPoint , subState } ) ; // Extract the UUID for the durable home and see whether this is a local
// or remote attach
ConsumableKey result = null ; SIBUuid8 durableHomeID = _messageProcessor . mapMeNameToUuid ( subState . getDurableHome ( ) ) ; if ( durableHomeID == null ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "attachToDurableSubscription" , "SIResourceException" ) ; // Lookup failed , throw an excepiton
throw new SIResourceException ( nls . getFormattedMessage ( "REMOTE_ME_MAPPING_ERROR_CWSIP0156" , new Object [ ] { subState . getDurableHome ( ) } , null ) ) ; } // Is durableHome local ?
if ( durableHomeID . equals ( _messageProcessor . getMessagingEngineUuid ( ) ) ) { // Directly attached
result = attachToLocalDurableSubscription ( consumerPoint , subState ) ; } else { // Stash the durableHomeID in the ConsumerDispatcherState
subState . setRemoteMEUuid ( durableHomeID ) ; // Remote attach
result = attachToRemoteDurableSubscription ( consumerPoint , subState , durableHomeID ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "attachToDurableSubscription" , result ) ; return result ; |
public class CreateApplicationVersionResult { /** * A list of values that you must specify before you can deploy certain applications . Some applications might
* include resources that can affect permissions in your AWS account , for example , by creating new AWS Identity and
* Access Management ( IAM ) users . For those applications , you must explicitly acknowledge their capabilities by
* specifying this parameter .
* The only valid values are CAPABILITY _ IAM , CAPABILITY _ NAMED _ IAM , CAPABILITY _ RESOURCE _ POLICY , and
* CAPABILITY _ AUTO _ EXPAND .
* The following resources require you to specify CAPABILITY _ IAM or CAPABILITY _ NAMED _ IAM : < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - properties - iam - group . html "
* > AWS : : IAM : : Group < / a > , < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - resource - iam - instanceprofile . html "
* > AWS : : IAM : : InstanceProfile < / a > , < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - resource - iam - policy . html "
* > AWS : : IAM : : Policy < / a > , and < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - resource - iam - role . html "
* > AWS : : IAM : : Role < / a > . If the application contains IAM resources , you can specify either CAPABILITY _ IAM or
* CAPABILITY _ NAMED _ IAM . If the application contains IAM resources with custom names , you must specify
* CAPABILITY _ NAMED _ IAM .
* The following resources require you to specify CAPABILITY _ RESOURCE _ POLICY : < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - resource - lambda - permission . html "
* > AWS : : Lambda : : Permission < / a > , < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - resource - iam - policy . html "
* > AWS : : IAM : Policy < / a > , < a href =
* " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - resource - applicationautoscaling - scalingpolicy . html "
* > AWS : : ApplicationAutoScaling : : ScalingPolicy < / a > , < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - properties - s3 - policy . html "
* > AWS : : S3 : : BucketPolicy < / a > , < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - properties - sqs - policy . html "
* > AWS : : SQS : : QueuePolicy < / a > , and < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - properties - sns - policy . html "
* > AWS : : SNS : : TopicPolicy < / a > .
* Applications that contain one or more nested applications require you to specify CAPABILITY _ AUTO _ EXPAND .
* If your application template contains any of the above resources , we recommend that you review all permissions
* associated with the application before deploying . If you don ' t specify this parameter for an application that
* requires capabilities , the call will fail .
* @ param requiredCapabilities
* A list of values that you must specify before you can deploy certain applications . Some applications might
* include resources that can affect permissions in your AWS account , for example , by creating new AWS
* Identity and Access Management ( IAM ) users . For those applications , you must explicitly acknowledge their
* capabilities by specifying this parameter . < / p >
* The only valid values are CAPABILITY _ IAM , CAPABILITY _ NAMED _ IAM , CAPABILITY _ RESOURCE _ POLICY , and
* CAPABILITY _ AUTO _ EXPAND .
* The following resources require you to specify CAPABILITY _ IAM or CAPABILITY _ NAMED _ IAM : < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - properties - iam - group . html "
* > AWS : : IAM : : Group < / a > , < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - resource - iam - instanceprofile . html "
* > AWS : : IAM : : InstanceProfile < / a > , < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - resource - iam - policy . html "
* > AWS : : IAM : : Policy < / a > , and < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - resource - iam - role . html "
* > AWS : : IAM : : Role < / a > . If the application contains IAM resources , you can specify either CAPABILITY _ IAM or
* CAPABILITY _ NAMED _ IAM . If the application contains IAM resources with custom names , you must specify
* CAPABILITY _ NAMED _ IAM .
* The following resources require you to specify CAPABILITY _ RESOURCE _ POLICY : < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - resource - lambda - permission . html "
* > AWS : : Lambda : : Permission < / a > , < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - resource - iam - policy . html "
* > AWS : : IAM : Policy < / a > , < a href =
* " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - resource - applicationautoscaling - scalingpolicy . html "
* > AWS : : ApplicationAutoScaling : : ScalingPolicy < / a > , < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - properties - s3 - policy . html "
* > AWS : : S3 : : BucketPolicy < / a > , < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - properties - sqs - policy . html "
* > AWS : : SQS : : QueuePolicy < / a > , and < a
* href = " https : / / docs . aws . amazon . com / AWSCloudFormation / latest / UserGuide / aws - properties - sns - policy . html "
* > AWS : : SNS : : TopicPolicy < / a > .
* Applications that contain one or more nested applications require you to specify CAPABILITY _ AUTO _ EXPAND .
* If your application template contains any of the above resources , we recommend that you review all
* permissions associated with the application before deploying . If you don ' t specify this parameter for an
* application that requires capabilities , the call will fail .
* @ see Capability */
public void setRequiredCapabilities ( java . util . Collection < String > requiredCapabilities ) { } } | if ( requiredCapabilities == null ) { this . requiredCapabilities = null ; return ; } this . requiredCapabilities = new java . util . ArrayList < String > ( requiredCapabilities ) ; |
public class AppendDataPoints { /** * Parses a column from storage , orders and drops newer duplicate data points .
* The parsing will return both a Cell collection for debugging and add
* the cells to concatenated qualifier and value arrays in the compacted data
* point format so that the results can be merged with other non - append
* columns or rows .
* WARNING : If the " tsd . core . repair _ appends " config is set to true then this
* method will issue puts against the database , overwriting the column with
* sorted and de - duplicated data . It will only do this for rows that are at
* least an hour old so as to avoid pounding current rows .
* TODO ( CL ) - allow for newer or older data points depending on a config .
* @ param tsdb The TSDB to which we belong
* @ param kv The key value t parse
* @ throws IllegalArgumentException if the given KV is not an append column
* or we were unable to parse the value . */
public final Collection < Cell > parseKeyValue ( final TSDB tsdb , final KeyValue kv ) { } } | if ( kv . qualifier ( ) . length != 3 || kv . qualifier ( ) [ 0 ] != APPEND_COLUMN_PREFIX ) { // it ' s really not an issue if the offset is not 0 , maybe in the future
// we ' ll support appends at different offsets .
throw new IllegalArgumentException ( "Can not parse cell, it is not " + " an appended cell. It has a different qualifier " + Bytes . pretty ( kv . qualifier ( ) ) + ", row key " + Bytes . pretty ( kv . key ( ) ) ) ; } final boolean repair = tsdb . getConfig ( ) . repair_appends ( ) ; final long base_time ; try { base_time = Internal . baseTime ( tsdb , kv . key ( ) ) ; } catch ( ArrayIndexOutOfBoundsException oob ) { throw new IllegalDataException ( "Corrupted value: invalid row key: " + kv , oob ) ; } int val_idx = 0 ; int val_length = 0 ; int qual_length = 0 ; int last_delta = - 1 ; // Time delta , extracted from the qualifier .
final Map < Integer , Internal . Cell > deltas = new TreeMap < Integer , Cell > ( ) ; boolean has_duplicates = false ; boolean out_of_order = false ; boolean needs_repair = false ; try { while ( val_idx < kv . value ( ) . length ) { byte [ ] q = Internal . extractQualifier ( kv . value ( ) , val_idx ) ; System . arraycopy ( kv . value ( ) , val_idx , q , 0 , q . length ) ; val_idx = val_idx + q . length ; int vlen = Internal . getValueLengthFromQualifier ( q , 0 ) ; byte [ ] v = new byte [ vlen ] ; System . arraycopy ( kv . value ( ) , val_idx , v , 0 , vlen ) ; val_idx += vlen ; int delta = Internal . getOffsetFromQualifier ( q ) ; final Cell duplicate = deltas . get ( delta ) ; if ( duplicate != null ) { // This is a duplicate cell , skip it
has_duplicates = true ; qual_length -= duplicate . qualifier . length ; val_length -= duplicate . value . length ; } qual_length += q . length ; val_length += vlen ; final Cell cell = new Cell ( q , v ) ; deltas . put ( delta , cell ) ; if ( ! out_of_order ) { // Data points needs to be sorted if we find at least one out of
// order data
if ( delta <= last_delta ) { out_of_order = true ; } last_delta = delta ; } } } catch ( ArrayIndexOutOfBoundsException oob ) { throw new IllegalDataException ( "Corrupted value: couldn't break down" + " into individual values (consumed " + val_idx + " bytes, but was" + " expecting to consume " + ( kv . value ( ) . length ) + "): " + kv + ", cells so far: " + deltas . values ( ) , oob ) ; } if ( has_duplicates || out_of_order ) { if ( ( DateTime . currentTimeMillis ( ) / 1000 ) - base_time > REPAIR_THRESHOLD ) { needs_repair = true ; } } // Check we consumed all the bytes of the value .
if ( val_idx != kv . value ( ) . length ) { throw new IllegalDataException ( "Corrupted value: couldn't break down" + " into individual values (consumed " + val_idx + " bytes, but was" + " expecting to consume " + ( kv . value ( ) . length ) + "): " + kv + ", cells so far: " + deltas . values ( ) ) ; } val_idx = 0 ; int qual_idx = 0 ; byte [ ] healed_cell = null ; int healed_index = 0 ; this . value = new byte [ val_length ] ; this . qualifier = new byte [ qual_length ] ; if ( repair && needs_repair ) { healed_cell = new byte [ val_length + qual_length ] ; } for ( final Cell cell : deltas . values ( ) ) { System . arraycopy ( cell . qualifier , 0 , this . qualifier , qual_idx , cell . qualifier . length ) ; qual_idx += cell . qualifier . length ; System . arraycopy ( cell . value , 0 , this . value , val_idx , cell . value . length ) ; val_idx += cell . value . length ; if ( repair && needs_repair ) { System . arraycopy ( cell . qualifier , 0 , healed_cell , healed_index , cell . qualifier . length ) ; healed_index += cell . qualifier . length ; System . arraycopy ( cell . value , 0 , healed_cell , healed_index , cell . value . length ) ; healed_index += cell . value . length ; } } if ( repair && needs_repair ) { LOG . debug ( "Repairing appended data column " + kv ) ; final PutRequest put = RequestBuilder . buildPutRequest ( tsdb . getConfig ( ) , tsdb . table , kv . key ( ) , TSDB . FAMILY ( ) , kv . qualifier ( ) , healed_cell , kv . timestamp ( ) ) ; repaired_deferred = tsdb . getClient ( ) . put ( put ) ; } return deltas . values ( ) ; |
public class DockerRule { /** * Stop and wait till given string will show in container output .
* @ param logSearchString String to wait for in container output .
* @ param waitTime Wait time .
* @ throws TimeoutException On wait timeout . */
public void waitForLogMessage ( final String logSearchString , int waitTime ) throws TimeoutException { } } | WaitForContainer . waitForCondition ( new LogChecker ( this , logSearchString ) , waitTime , describe ( ) ) ; |
public class SpiderParam { /** * Sets the maximum depth the spider can crawl .
* Value { @ value # UNLIMITED _ DEPTH } for unlimited depth .
* @ param maxDepth the new maximum depth .
* @ see # getMaxDepth ( ) */
public void setMaxDepth ( int maxDepth ) { } } | this . maxDepth = maxDepth > UNLIMITED_DEPTH ? maxDepth : UNLIMITED_DEPTH ; getConfig ( ) . setProperty ( SPIDER_MAX_DEPTH , Integer . toString ( this . maxDepth ) ) ; |
public class CrLfDecodingState { /** * { @ inheritDoc } */
public DecodingState decode ( IoBuffer in , ProtocolDecoderOutput out ) throws Exception { } } | boolean found = false ; boolean finished = false ; while ( in . hasRemaining ( ) ) { byte b = in . get ( ) ; if ( ! hasCR ) { if ( b == CR ) { hasCR = true ; } else { if ( b == LF ) { found = true ; } else { in . position ( in . position ( ) - 1 ) ; found = false ; } finished = true ; break ; } } else { if ( b == LF ) { found = true ; finished = true ; break ; } throw new ProtocolDecoderException ( "Expected LF after CR but was: " + ( b & 0xff ) ) ; } } if ( finished ) { hasCR = false ; return finishDecode ( found , out ) ; } return this ; |
public class ViewPositionAnimator { /** * Updates initial view in case it was changed . You should not call this method if view stays
* the same since animator should automatically detect view position changes .
* @ param from New ' from ' view */
public void update ( @ NonNull View from ) { } } | if ( GestureDebug . isDebugAnimator ( ) ) { Log . d ( TAG , "Updating view" ) ; } updateInternal ( from ) ; |
public class SimpleHTTPClient { /** * Create a RequestBuilder with HTTP method and URL .
* @ param method HTTP method .
* @ param url The request URL .
* @ return A new RequestBuilder that helps you to build an HTTP request . */
public RequestBuilder request ( HttpMethod method , String url ) { } } | return request ( method . asString ( ) , url ) ; |
public class CPMeasurementUnitLocalServiceBaseImpl { /** * Returns the cp measurement unit matching the UUID and group .
* @ param uuid the cp measurement unit ' s UUID
* @ param groupId the primary key of the group
* @ return the matching cp measurement unit , or < code > null < / code > if a matching cp measurement unit could not be found */
@ Override public CPMeasurementUnit fetchCPMeasurementUnitByUuidAndGroupId ( String uuid , long groupId ) { } } | return cpMeasurementUnitPersistence . fetchByUUID_G ( uuid , groupId ) ; |
public class ExceptionSoftener { /** * Soften a CheckedIntFunction that can throw Checked Exceptions to a standard IntFunction that can also throw Checked Exceptions ( without declaring them )
* e . g .
* < pre >
* { @ code
* int loaded = ExceptionSoftener . softenFunction ( this : : load ) . applyHKT ( id ) ;
* public int load ( int it ) throws IOException
* < / pre >
* @ param fn CheckedIntFunction to be converted to a standard IntFunction
* @ return IntFunction that can throw checked Exceptions */
public static < R > IntFunction < R > softenIntFunction ( final CheckedIntFunction < R > fn ) { } } | return t -> { try { return fn . apply ( t ) ; } catch ( final Throwable e ) { throw throwSoftenedException ( e ) ; } } ; |
public class SimpleMatrix { /** * Creates a new SimpleMatrix with the specified DMatrixRMaj used as its internal matrix . This means
* that the reference is saved and calls made to the returned SimpleMatrix will modify the passed in DMatrixRMaj .
* @ param internalMat The internal DMatrixRMaj of the returned SimpleMatrix . Will be modified . */
public static SimpleMatrix wrap ( Matrix internalMat ) { } } | SimpleMatrix ret = new SimpleMatrix ( ) ; ret . setMatrix ( internalMat ) ; return ret ; |
public class ContentExtractor { /** * / * 输入Jsoup的Document , 获取正文所在Element */
public static Element getContentElementByDoc ( Document doc ) throws Exception { } } | ContentExtractor ce = new ContentExtractor ( doc ) ; return ce . getContentElement ( ) ; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.