signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class InconsistentProperty { /** * < code > map & lt ; string , . alluxio . grpc . meta . InconsistentPropertyValues & gt ; values = 2 ; < / code > */ public java . util . Map < java . lang . String , alluxio . grpc . InconsistentPropertyValues > getValuesMap ( ) { } }
return internalGetValues ( ) . getMap ( ) ;
public class SafeTraceLevelIndexFactory { /** * Create the package index from the contents of the resource . * @ param resourceName The resource name to load the trace list from . */ public static PackageIndex < Integer > createPackageIndex ( String resourceName ) { } }
PackageIndex < Integer > packageIndex = new PackageIndex < Integer > ( ) ; BufferedReader br = null ; try { br = getLibertyTraceListReader ( resourceName ) ; addFiltersAndValuesToIndex ( br , packageIndex ) ; } catch ( IOException e ) { System . err . println ( "Unable to load " + resourceName ) ; } finally { tryToCloseReader ( br ) ; } packageIndex . compact ( ) ; return packageIndex ;
public class NoRegExpMatching { /** * opens and returns the requested filename string * @ throws SftpStatusException */ public String [ ] matchFileNamesWithPattern ( File [ ] files , String fileNameRegExp ) throws SshException , SftpStatusException { } }
String [ ] thefile = new String [ 1 ] ; thefile [ 0 ] = files [ 0 ] . getName ( ) ; return thefile ;
public class ProtoNetworkMerger { /** * Remaps { @ link TableProtoEdge proto edges } for a * { @ link TableStatement statement } . A new statement index is created from * a merge which requires the old { @ link TableProtoEdge proto edges } to be * associated with it . * @ see https : / / github . com / OpenBEL / openbel - framework / issues / 49 * @ param protoNetwork1 { @ link ProtoNetwork } ; merge into * @ param protoNetwork2 { @ link ProtoNetwork } ; merge from * @ param documentId { @ code int } ; bel document id * @ param termMap { @ link Map } of old term id to new proto node id * @ param newStatementIndex { @ code int } new merged statement id * @ param edges { @ link List } ; merging statement ' s * { @ link TableProtoEdge edges } * @ param edgeIndices { @ link Set } ; set of old statement ' s edge indices */ private void remapEdges ( ProtoNetwork protoNetwork1 , ProtoNetwork protoNetwork2 , int documentId , Map < Integer , Integer > termMap , int newStatementIndex , List < TableProtoEdge > edges , Set < Integer > edgeIndices ) { } }
ProtoNodeTable nt = protoNetwork2 . getProtoNodeTable ( ) ; Map < Integer , Integer > nodeTermIndex = nt . getNodeTermIndex ( ) ; TableProtoEdge [ ] remappedEdges = new TableProtoEdge [ edgeIndices . size ( ) ] ; int i = 0 ; for ( Integer edgeIndex : edgeIndices ) { TableProtoEdge edge = edges . get ( edgeIndex ) ; int sourceBefore = edge . getSource ( ) ; int targetBefore = edge . getTarget ( ) ; Integer sourceTerm = nodeTermIndex . get ( sourceBefore ) ; Integer targetTerm = nodeTermIndex . get ( targetBefore ) ; Integer newSource = termMap . get ( sourceTerm ) ; if ( newSource == null ) { newSource = mergeTerm ( sourceTerm , protoNetwork1 , protoNetwork2 , documentId , termMap ) ; } Integer newTarget = termMap . get ( targetTerm ) ; if ( newTarget == null ) { newTarget = mergeTerm ( targetTerm , protoNetwork1 , protoNetwork2 , documentId , termMap ) ; } remappedEdges [ i ++ ] = new TableProtoEdge ( newSource , edge . getRel ( ) , newTarget ) ; } ProtoEdgeTable edgeTable = protoNetwork1 . getProtoEdgeTable ( ) ; edgeTable . addEdges ( newStatementIndex , remappedEdges ) ;
public class UniqueId { /** * Converts a Long to a byte array with the proper UID width * @ param uid The UID to convert * @ param width The width of the UID in bytes * @ return The UID as a byte array * @ throws IllegalStateException if the UID is larger than the width would * allow * @ since 2.1 */ public static byte [ ] longToUID ( final long uid , final short width ) { } }
// Verify that we ' re going to drop bytes that are 0. final byte [ ] padded = Bytes . fromLong ( uid ) ; for ( int i = 0 ; i < padded . length - width ; i ++ ) { if ( padded [ i ] != 0 ) { final String message = "UID " + Long . toString ( uid ) + " was too large for " + width + " bytes" ; LOG . error ( "OMG " + message ) ; throw new IllegalStateException ( message ) ; } } // Shrink the ID on the requested number of bytes . return Arrays . copyOfRange ( padded , padded . length - width , padded . length ) ;
public class AbstractParsedStmt { /** * Order by Columns or expressions has to operate on the display columns or expressions . * @ return */ protected boolean orderByColumnsCoverUniqueKeys ( ) { } }
// In theory , if EVERY table in the query has a uniqueness constraint // ( primary key or other unique index ) on columns that are all listed in the ORDER BY values , // the result is deterministic . // This holds regardless of whether the associated index is actually used in the selected plan , // so this check is plan - independent . // baseTableAliases associates table aliases with the order by // expressions which reference them . Presumably by using // table aliases we will map table scans to expressions rather // than tables to expressions , and not confuse ourselves with // different instances of the same table in self joins . HashMap < String , List < AbstractExpression > > baseTableAliases = new HashMap < > ( ) ; for ( ParsedColInfo col : orderByColumns ( ) ) { AbstractExpression expr = col . m_expression ; // Compute the set of tables mentioned in the expression . // 1 . Search out all the TVEs . // 2 . Throw the aliases of the tables of each of these into a HashSet . // The table must have an alias . It might not have a name . // 3 . If the HashSet has size > 1 we can ' t use this expression . List < TupleValueExpression > baseTVEExpressions = expr . findAllTupleValueSubexpressions ( ) ; Set < String > baseTableNames = new HashSet < > ( ) ; for ( TupleValueExpression tve : baseTVEExpressions ) { String tableAlias = tve . getTableAlias ( ) ; assert ( tableAlias != null ) ; baseTableNames . add ( tableAlias ) ; } if ( baseTableNames . size ( ) != 1 ) { // Table - spanning ORDER BYs - - like ORDER BY A . X + B . Y are not helpful . // Neither are ( nonsense ) constant ( table - less ) expressions . continue ; } // Everything in the baseTVEExpressions table is a column // in the same table and has the same alias . So just grab the first one . // All we really want is the alias . AbstractExpression baseTVE = baseTVEExpressions . get ( 0 ) ; String nextTableAlias = ( ( TupleValueExpression ) baseTVE ) . getTableAlias ( ) ; // This was tested above . But the assert above may prove to be over cautious // and disappear . assert ( nextTableAlias != null ) ; List < AbstractExpression > perTable = baseTableAliases . get ( nextTableAlias ) ; if ( perTable == null ) { perTable = new ArrayList < > ( ) ; baseTableAliases . put ( nextTableAlias , perTable ) ; } perTable . add ( expr ) ; } if ( m_tableAliasMap . size ( ) > baseTableAliases . size ( ) ) { // FIXME : There are more table aliases in the select list than tables // named in the order by clause . So , some tables named in the // select list are not explicitly listed in the order by // clause . // This would be one of the tricky cases where the goal would be to prove that the // row with no ORDER BY component came from the right side of a 1 - to - 1 or many - to - 1 join . // like Unique Index nested loop join , etc . return false ; } boolean allScansAreDeterministic = true ; for ( Entry < String , List < AbstractExpression > > orderedAlias : baseTableAliases . entrySet ( ) ) { List < AbstractExpression > orderedAliasExprs = orderedAlias . getValue ( ) ; StmtTableScan tableScan = getStmtTableScanByAlias ( orderedAlias . getKey ( ) ) ; if ( tableScan == null ) { assert ( false ) ; return false ; } if ( tableScan instanceof StmtSubqueryScan ) { return false ; // don ' t yet handle FROM clause subquery , here . } Table table = ( ( StmtTargetTableScan ) tableScan ) . getTargetTable ( ) ; // This table ' s scans need to be proven deterministic . allScansAreDeterministic = false ; // Search indexes for one that makes the order by deterministic for ( Index index : table . getIndexes ( ) ) { // skip non - unique indexes if ( ! index . getUnique ( ) ) { continue ; } // get the list of expressions for the index List < AbstractExpression > indexExpressions = new ArrayList < > ( ) ; String jsonExpr = index . getExpressionsjson ( ) ; // if this is a pure - column index . . . if ( jsonExpr . isEmpty ( ) ) { for ( ColumnRef cref : index . getColumns ( ) ) { Column col = cref . getColumn ( ) ; TupleValueExpression tve = new TupleValueExpression ( table . getTypeName ( ) , orderedAlias . getKey ( ) , col . getName ( ) , col . getName ( ) , col . getIndex ( ) ) ; indexExpressions . add ( tve ) ; } } // if this is a fancy expression - based index . . . else { try { indexExpressions = AbstractExpression . fromJSONArrayString ( jsonExpr , tableScan ) ; } catch ( JSONException e ) { e . printStackTrace ( ) ; assert ( false ) ; continue ; } } // If the sort covers the index , then it ' s a unique sort . // TODO : The statement ' s equivalence sets would be handy here to recognize cases like // WHERE B . unique _ id = A . b _ id // ORDER BY A . unique _ id , A . b _ id if ( orderedAliasExprs . containsAll ( indexExpressions ) ) { allScansAreDeterministic = true ; break ; } } // ALL tables ' scans need to have proved deterministic if ( ! allScansAreDeterministic ) { return false ; } } return true ;
public class ApplicationMetadata { /** * Gets the metamodel . * @ param persistenceUnit * the persistence unit * @ return the metamodel */ public Metamodel getMetamodel ( String persistenceUnit ) { } }
Map < String , Metamodel > model = getMetamodelMap ( ) ; return persistenceUnit != null && model . containsKey ( persistenceUnit ) ? model . get ( persistenceUnit ) : null ;
public class SeleniumActionBuilder { /** * Make screenshot with custom output directory . */ public SeleniumActionBuilder screenshot ( String outputDir ) { } }
MakeScreenshotAction action = new MakeScreenshotAction ( ) ; action . setOutputDir ( outputDir ) ; action ( action ) ; return this ;
public class WebCommonInterceptor { /** * 当拦截器内发生错误时 , 返回json格式的错误信息 * @ param request * 请求request * @ param response * 返回response * @ param message * 错误消息 * @ throws IOException */ protected void returnJsonSystemError ( HttpServletRequest request , HttpServletResponse response , ResultCode resultCode ) throws IOException { } }
Map < String , Object > errors = new HashMap < String , Object > ( ) ; errors . put ( WebResponseConstant . MESSAGE_GLOBAL , resultCode . getMessage ( ) . getMessage ( ) ) ; JsonObject < ? > json = JsonObject . create ( ) ; json . setStatus ( resultCode . getCode ( ) ) ; json . setStatusInfo ( errors ) ; response . setContentType ( "application/json; charset=UTF-8" ) ; response . setCharacterEncoding ( "UTF-8" ) ; response . getWriter ( ) . write ( JsonUtils . toJson ( json ) ) ;
public class CmsDateRestrictionParser { /** * Parses a positive integer . < p > * @ param loc the location of the positive number * @ return the number , or null if it could not be parsed */ private Integer parsePositiveNumber ( CmsXmlContentValueLocation loc ) { } }
if ( loc == null ) { return null ; } try { Integer result = Integer . valueOf ( loc . getValue ( ) . getStringValue ( m_cms ) . trim ( ) ) ; if ( result . intValue ( ) < 0 ) { return null ; } else { return result ; } } catch ( Exception e ) { LOG . info ( e . getLocalizedMessage ( ) , e ) ; return null ; }
public class FilterableTableExample { /** * Sets the state of the clearAllActions button based on the visibility of the filter menus and if the button is * visible sets its disabled state if nothing is filtered . * This is usability sugar , it is not necessary for the functionality of the filters . */ private void setUpClearAllAction ( ) { } }
/* if one or fewer of the filter menus are visible then we don ' t need the clear all menus button */ int visibleMenus = 0 ; if ( firstNameFilterMenu . isVisible ( ) ) { visibleMenus ++ ; } if ( lastNameFilterMenu . isVisible ( ) ) { visibleMenus ++ ; } if ( dobFilterMenu . isVisible ( ) ) { visibleMenus ++ ; } clearAllFiltersButton . setVisible ( visibleMenus > 1 ) ; /* enable / disable the clear all filters action : * if we have not initialised the lists then we do not need the button ( though in this case it will not be visible ) ; * otherwise if the size of the full list is the same as the size of the filtered list then we have not filtered the * list so we do not need to clear the filters and the button can be disabled ; * otherwise enable the button because we have applied at least one filter . */ if ( clearAllFiltersButton . isVisible ( ) ) { List < ? > fullList = getFilterableTableModel ( ) . getFullBeanList ( ) ; List < ? > filteredList = getFilterableTableModel ( ) . getBeanList ( ) ; clearAllFiltersButton . setDisabled ( fullList == null || filteredList == null || fullList . size ( ) == filteredList . size ( ) ) ; }
public class CmsShellCommands { /** * Returns the Locales available on the system ready to use on Method * { @ link # setLocale ( String ) } from the < code > { @ link CmsShell } < / code > . < p > * Note that the full name containing language , country and optional variant seperated * by underscores is returned always but the latter two parts may be left out . < p > */ public void getLocales ( ) { } }
m_shell . getOut ( ) . println ( getMessages ( ) . key ( Messages . GUI_SHELL_LOCALES_AVAILABLE_0 ) ) ; Locale [ ] locales = Locale . getAvailableLocales ( ) ; for ( int i = locales . length - 1 ; i >= 0 ; i -- ) { m_shell . getOut ( ) . println ( " \"" + locales [ i ] . toString ( ) + "\"" ) ; }
public class WTabSet { /** * Retrieves the tab index for the given tab content . * @ param content the tab content * @ return the tab index , or - 1 if the content is not in a tab in this tab set . */ public int getTabIndex ( final WComponent content ) { } }
List < WTab > tabs = getTabs ( ) ; final int count = tabs . size ( ) ; for ( int i = 0 ; i < count ; i ++ ) { WTab tab = tabs . get ( i ) ; if ( content == tab . getContent ( ) ) { return i ; } } return - 1 ;
public class HeadersUtils { /** * { @ link Headers # names ( ) } and convert each element of { @ link Set } to a { @ link String } . * @ param headers the headers to get the names from * @ return a { @ link Set } of header values or an empty { @ link Set } if no values are found . */ public static Set < String > namesAsString ( Headers < CharSequence , CharSequence , ? > headers ) { } }
return new CharSequenceDelegatingStringSet ( headers . names ( ) ) ;
public class DoubleAccessor { /** * To long . * @ param data * the data * @ return the long */ private long toLong ( byte [ ] data ) { } }
if ( data == null || data . length != 8 ) return 0x0 ; return ( long ) ( // ( Below ) convert to longs before shift because digits // are lost with ints beyond the 32 - bit limit ( long ) ( 0xff & data [ 0 ] ) << 56 | ( long ) ( 0xff & data [ 1 ] ) << 48 | ( long ) ( 0xff & data [ 2 ] ) << 40 | ( long ) ( 0xff & data [ 3 ] ) << 32 | ( long ) ( 0xff & data [ 4 ] ) << 24 | ( long ) ( 0xff & data [ 5 ] ) << 16 | ( long ) ( 0xff & data [ 6 ] ) << 8 | ( long ) ( 0xff & data [ 7 ] ) << 0 ) ;
public class AbstractCassandraStorage { /** * convert CfDef to string */ protected static String cfdefToString ( CfDef cfDef ) throws IOException { } }
assert cfDef != null ; // this is so awful it ' s kind of cool ! TSerializer serializer = new TSerializer ( new TBinaryProtocol . Factory ( ) ) ; try { return Hex . bytesToHex ( serializer . serialize ( cfDef ) ) ; } catch ( TException e ) { throw new IOException ( e ) ; }
public class CitrusAnnotations { /** * Inject Citrus framework instance to the test class fields with { @ link CitrusFramework } annotation . * @ param testCase * @ param citrusFramework */ public static final void injectCitrusFramework ( final Object testCase , final Citrus citrusFramework ) { } }
ReflectionUtils . doWithFields ( testCase . getClass ( ) , new ReflectionUtils . FieldCallback ( ) { @ Override public void doWith ( Field field ) throws IllegalArgumentException , IllegalAccessException { log . debug ( String . format ( "Injecting Citrus framework instance on test class field '%s'" , field . getName ( ) ) ) ; ReflectionUtils . setField ( field , testCase , citrusFramework ) ; } } , new ReflectionUtils . FieldFilter ( ) { @ Override public boolean matches ( Field field ) { if ( field . isAnnotationPresent ( CitrusFramework . class ) && Citrus . class . isAssignableFrom ( field . getType ( ) ) ) { if ( ! field . isAccessible ( ) ) { ReflectionUtils . makeAccessible ( field ) ; } return true ; } return false ; } } ) ;
public class ObjectGraphDump { /** * Visits all the keys and entries of the given map . * @ param node the ObjectGraphNode containing the map . */ private void visitMap ( final ObjectGraphNode node ) { } }
Map map = ( Map ) node . getValue ( ) ; for ( Iterator i = map . entrySet ( ) . iterator ( ) ; i . hasNext ( ) ; ) { Map . Entry entry = ( Map . Entry ) i . next ( ) ; Object key = entry . getKey ( ) ; if ( key != null ) { ObjectGraphNode keyNode = new ObjectGraphNode ( ++ nodeCount , "key" , key . getClass ( ) . getName ( ) , key ) ; node . add ( keyNode ) ; visit ( keyNode ) ; } else { ObjectGraphNode keyNode = new ObjectGraphNode ( ++ nodeCount , "key" , Object . class . getName ( ) , null ) ; node . add ( keyNode ) ; } Object value = entry . getValue ( ) ; if ( value != null ) { ObjectGraphNode valueNode = new ObjectGraphNode ( ++ nodeCount , "value" , value . getClass ( ) . getName ( ) , value ) ; node . add ( valueNode ) ; visit ( valueNode ) ; } else { ObjectGraphNode valueNode = new ObjectGraphNode ( ++ nodeCount , "value" , Object . class . getName ( ) , null ) ; node . add ( valueNode ) ; } } adjustOverhead ( node ) ;
public class TransactionState { /** * Directs the TransactionState to recover its state after a failure , based on * the given RecoverableUnit object . If the TransactionState has already been * defined or recovered , the operation returns the current state of the * transaction . If the state cannot be recovered , the operation returns none . * If the RecoverableUnit records information prior to a log record being * forced , this may result in recovery of an in - flight transaction . The * TransactionState returns active in this case . * @ param log The RecoverableUnit for the transaction . * @ return The current state of the transaction . */ public int reconstruct ( RecoverableUnit log ) throws SystemException { } }
if ( tc . isEntryEnabled ( ) ) Tr . entry ( tc , "reconstruct" , new Object [ ] { this , log } ) ; int result = STATE_NONE ; int logState = 0 ; // Lookup the TransactionState RecoverableUnitSection _logSection = log . lookupSection ( TransactionImpl . TRAN_STATE_SECTION ) ; if ( _logSection != null ) { try { final byte [ ] logData = _logSection . lastData ( ) ; if ( logData . length == 1 ) { logState = logData [ 0 ] & 0xff ; // Set the state value to be returned from the reconstruct method switch ( logState ) { case STATE_PREPARED : case STATE_COMMITTING : case STATE_COMMITTED : case STATE_ROLLING_BACK : case STATE_ROLLED_BACK : case STATE_HEURISTIC_ON_COMMIT : case STATE_HEURISTIC_ON_ROLLBACK : case STATE_LAST_PARTICIPANT : result = logState ; break ; case STATE_NONE : case STATE_ACTIVE : case STATE_PREPARING : default : throw new SystemException ( "Transaction recovered in invalid state" ) ; } } else { // If the log record data is invalid , then exit immediately . throw new SystemException ( "Invalid transaction state record data in log" ) ; } } catch ( Throwable e ) { FFDCFilter . processException ( e , "com.ibm.tx.jta.impl.TransactionState.reconstruct" , "274" , this ) ; Tr . fatal ( tc , "WTRN0000_ERR_INT_ERROR" , new Object [ ] { "reconstruct" , "com.ibm.tx.jta.impl.TransactionState" , e } ) ; if ( tc . isEventEnabled ( ) ) Tr . event ( tc , "Unable to access transaction state log record data" ) ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "reconstruct" ) ; throw new SystemException ( e . toString ( ) ) ; } } else { // PK84994 starts here // If the state record is not found , then exit immediately with state set to STATE _ NONE . // Log an FFDC to show this has happened FFDCFilter . processException ( new InternalLogException ( ) , "com.ibm.tx.jta.impl.TransactionState.setState" , "277" , this ) ; if ( tc . isEventEnabled ( ) ) Tr . event ( tc , "No log record data for transaction state - returning NONE" ) ; _state = STATE_NONE ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "reconstruct" , stateToString ( _state ) ) ; return _state ; // PK84994 ends here } _state = result ; _tranLog = log ; // Create a global identifier . final RecoverableUnitSection gtidSection = log . lookupSection ( TransactionImpl . GLOBALID_SECTION ) ; if ( gtidSection != null ) { try { final byte [ ] logData = gtidSection . lastData ( ) ; if ( logData . length > 12 ) // We must have formatId and lengths encoded { final XidImpl xid = new XidImpl ( logData , 0 ) ; _tran . setXidImpl ( xid ) ; } else { // If the log record data is invalid , then exit immediately . throw new SystemException ( "Invalid transaction global identifier record data in log" ) ; } } catch ( Throwable e ) { FFDCFilter . processException ( e , "com.ibm.tx.jta.impl.TransactionState.reconstruct" , "334" , this ) ; Tr . fatal ( tc , "WTRN0000_ERR_INT_ERROR" , new Object [ ] { "reconstruct" , "com.ibm.tx.jta.impl.TransactionState" , e } ) ; if ( tc . isEventEnabled ( ) ) Tr . event ( tc , "Unable to access global transaction id log record data" ) ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "reconstruct" ) ; throw new SystemException ( e . toString ( ) ) ; } } else { // PK84994 starts here // If the global transaction id record is not found , then exit immediately with state set to STATE _ NONE . // Log an FFDC to show this has happened FFDCFilter . processException ( new InternalLogException ( ) , "com.ibm.tx.jta.impl.TransactionState.setState" , "336" , this ) ; if ( tc . isEventEnabled ( ) ) Tr . event ( tc , "No log record data for global transaction id - returning NONE" ) ; _state = STATE_NONE ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "reconstruct" , stateToString ( _state ) ) ; return _state ; // PK84994 ends here } if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "reconstruct" , stateToString ( result ) ) ; return result ;
public class Files { /** * Deletes a path from the filesystem * If the path is a directory its contents * will be recursively deleted before it itself * is deleted . * Note that removal of a directory is not an atomic - operation * and so if an error occurs during removal , some of the directories * descendants may have already been removed * @ throws IOException if an error occurs whilst removing a file or directory */ public static void forceDelete ( final Path path ) throws IOException { } }
if ( ! java . nio . file . Files . isDirectory ( path ) ) { java . nio . file . Files . delete ( path ) ; } else { java . nio . file . Files . walkFileTree ( path , DeleteDirVisitor . getInstance ( ) ) ; }
public class Model { /** * Gets attribute value as < code > Double < / code > . * If there is a { @ link Converter } registered for the attribute that converts from Class < code > S < / code > to Class * < code > java . lang . Double < / code > , given the attribute value is an instance of < code > S < / code > , then it will be used , * otherwise performs a conversion using { @ link Convert # toDouble ( Object ) } . * @ param attributeName name of attribute to convert * @ return value converted to < code > Double < / code > */ public Double getDouble ( String attributeName ) { } }
Object value = getRaw ( attributeName ) ; Converter < Object , Double > converter = modelRegistryLocal . converterForValue ( attributeName , value , Double . class ) ; return converter != null ? converter . convert ( value ) : Convert . toDouble ( value ) ;
public class Streams { /** * Perform a flatMap operation where the result will be a flattened stream of Strings * from the text loaded from the supplied URLs * < pre > * { @ code * List < String > result = Streams . liftAndBindURL ( Stream . of ( " input . file " ) * , getClass ( ) . getClassLoader ( ) : : getResource ) * . collect ( CyclopsCollectors . toList ( ) ; * assertThat ( result , equalTo ( Arrays . asList ( " hello " , " world " ) ) ) ; * < / pre > * @ param fn * @ return */ public final static < T > Stream < String > flatMapURL ( final Stream < T > stream , final Function < ? super T , URL > fn ) { } }
return stream . flatMap ( fn . andThen ( url -> ExceptionSoftener . softenSupplier ( ( ) -> { final BufferedReader in = new BufferedReader ( new InputStreamReader ( url . openStream ( ) ) ) ; return in . lines ( ) ; } ) . get ( ) ) ) ;
public class SQSConnection { /** * This method is not supported . */ @ Override public ConnectionConsumer createConnectionConsumer ( Queue queue , String messageSelector , ServerSessionPool sessionPool , int maxMessages ) throws JMSException { } }
throw new JMSException ( SQSMessagingClientConstants . UNSUPPORTED_METHOD ) ;
public class IndianCalendar { /** * / * [ deutsch ] * < p > Erzeugt ein neues indisches Kalenderdatum . < / p > * @ param iyear Indian year in the range 1-9999921 * @ param imonth Indian month in range 1-12 * @ param idom Indian day of month in range 1-31 * @ return new instance of { @ code IndianCalendar } * @ throws IllegalArgumentException in case of any inconsistencies */ public static IndianCalendar of ( int iyear , int imonth , int idom ) { } }
if ( ! CALSYS . isValid ( IndianEra . SAKA , iyear , imonth , idom ) ) { throw new IllegalArgumentException ( "Invalid Indian date: year=" + iyear + ", month=" + imonth + ", day=" + idom ) ; } return new IndianCalendar ( iyear , imonth , idom ) ;
public class TypeSystem { /** * DO NOT USE OR DELETE . Called form the debugging process ( IDE ) . * @ param filePaths */ public static void refreshedFiles ( String [ ] filePaths ) { } }
for ( String filePath : filePaths ) { IFile file = CommonServices . getFileSystem ( ) . getIFile ( new File ( filePath ) ) ; if ( file != null ) { TypeSystem . refreshed ( file ) ; } }
public class ConnectionReadCompletedCallback { /** * Called by the next channel in the chain when an outstanding read request has completed * successfully . * @ see TCPReadCompletedCallback # complete ( VirtualConnection , TCPReadRequestContext ) */ public void complete ( NetworkConnection vc , IOReadRequestContext rctx ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "complete" , new Object [ ] { vc , rctx } ) ; if ( thisConnection . isLoggingIOEvents ( ) ) thisConnection . getConnectionEventRecorder ( ) . logDebug ( "complete method invoked on read context " + System . identityHashCode ( rctx ) ) ; // First update the invocation count . If we are being called back on the same thread as // last time , increment the counter . Otherwise , start counting from 1 again . synchronized ( invocationCountLock ) { if ( lastInvokedOnThread == Thread . currentThread ( ) ) { invocationCount ++ ; } else { invocationCount = 1 ; lastInvokedOnThread = Thread . currentThread ( ) ; } } try { synchronized ( this ) { boolean done = false ; do { done = true ; WsByteBuffer contextBuffer = rctx . getBuffer ( ) ; contextBuffer . flip ( ) ; // Notify PMI that read has completed . if ( conversation . getConversationType ( ) == Conversation . CLIENT ) { xmitParser . setType ( Conversation . CLIENT ) ; } else if ( conversation . getConversationType ( ) == Conversation . ME ) { xmitParser . setType ( Conversation . ME ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) JFapUtils . debugTraceWsByteBuffer ( this , tc , contextBuffer , 16 , "data received" ) ; xmitParser . parse ( contextBuffer ) ; if ( ( rctx != null ) && ( ! receivePhysicalCloseRequest ) ) { // Calculate the amount of time before the request times out // This is the mechanism by which we implement heartbeat // intervals and time outs . int timeout ; if ( awaitingHeartbeatResponse . isSet ( ) ) { // We can only reach this point in the code if we have // made a heartbeat request but this callback was driven // because of a non - heartbeat response . In previous versions // of the code we were careful to calculate the remaining time and // make another read request . This caused timeouts ala defect // 363463 . So , now the code is more generous and resets its // timer to the full heartbeat timeout value every time any // non - heartbeat trasmission is received from our peer . This isn ' t // unreasonable as the fact we are receiving data indicates that the // peer is probably still healthy . timeout = currentHeartbeatTimeout * 1000 ; if ( timeout < 1 ) timeout = 1 ; } else { // We are not awaiting a heartbeat response , use the // heartbeat interval as our timeout . timeout = thisConnection . getHeartbeatInterval ( ) * 1000 ; } if ( timeout > 0 ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "setting heartbeat timeout to: " + timeout + " milliseconds" ) ; } else { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "not using a heartbeat timeout" ) ; timeout = IOReadRequestContext . NO_TIMEOUT ; } boolean closing = false ; synchronized ( connectionClosingLock ) { closing = connectionClosing ; } // If the connection is closing / closed our buffers will have been released , so make sure we don ' t use them again . if ( ! closing ) { // Crude way to ensure we end up with the right sized read buffer . if ( isFirstCompleteInvocation ) { int readBufferSize = Integer . parseInt ( RuntimeInfo . getProperty ( "com.ibm.ws.sib.jfapchannel.DEFAULT_READ_BUFFER_SIZE" , "" + JFapChannelConstants . DEFAULT_READ_BUFFER_SIZE ) ) ; if ( ! contextBuffer . isDirect ( ) || ( contextBuffer . capacity ( ) < readBufferSize ) ) { // Make sure we release the other buffer so we don ' t leak memory . contextBuffer . release ( ) ; contextBuffer = WsByteBufferPool . getInstance ( ) . allocateDirect ( readBufferSize ) ; rctx . setBuffer ( contextBuffer ) ; } isFirstCompleteInvocation = false ; } contextBuffer . clear ( ) ; // Decide whether to explictly request a thread switch . We ' ll do this if we // have been recursively called more than MAX _ INVOCATIONS _ BEFORE _ THREAD _ SWITCH boolean forceQueue = false ; synchronized ( invocationCountLock ) { if ( invocationCount > MAX_INVOCATIONS_BEFORE_THREAD_SWITCH ) { forceQueue = true ; lastInvokedOnThread = null ; } } if ( thisConnection . isLoggingIOEvents ( ) ) thisConnection . getConnectionEventRecorder ( ) . logDebug ( "invoking readCtx.read() on context " + System . identityHashCode ( rctx ) + " with a timeout of " + timeout ) ; done = ( rctx . read ( 1 , this , forceQueue , timeout ) == null ) ; } } } while ( ! done ) ; } } catch ( Error error ) { FFDCFilter . processException ( error , "com.ibm.ws.sib.jfapchannel.impl.ConnectionReadCompletedCallback" , JFapChannelConstants . CONNREADCOMPCALLBACK_COMPLETE_01 , thisConnection . getDiagnostics ( true ) ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) SibTr . exception ( this , tc , error ) ; // It might appear slightly odd for this code to catch Error ( especially since the JDK docs say // that Error means that something has gone so badly wrong that you should abandon all hope ) . // This code makes one final stab at putting out some diagnostics about what happened ( if we // propagate the Error up to the TCP Channel , it is sometimes lost ) and closing down the // connection . I figured that we might as well try to do something - as we can hardly make // things worse . . . ( famous last words ) thisConnection . invalidate ( false , error , "Error caught in ConnectionReadCompletedCallback.complete()" ) ; // Re - throw the error to ensure that it causes the maximum devastation . // The JVM is probably very ill if an Error is thrown so attempt no recovery . throw error ; } catch ( RuntimeException runtimeException ) { FFDCFilter . processException ( runtimeException , "com.ibm.ws.sib.jfapchannel.impl.ConnectionReadCompletedCallback" , JFapChannelConstants . CONNREADCOMPCALLBACK_COMPLETE_02 , thisConnection . getDiagnostics ( true ) ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) SibTr . exception ( this , tc , runtimeException ) ; // We can reasonably try to recover from a runtime exception by invalidating the associated // connection . This should drive the underlying TCP / IP socket to be closed . thisConnection . invalidate ( false , runtimeException , "RuntimeException caught in ConnectionReadCompletedCallback.complete" ) ; // Don ' t throw the RuntimeException on as we risk blowing away part of the TCP channel . } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "complete" ) ;
public class RGroupQuery { /** * Recursive function to produce valid configurations * for { @ link # getAllConfigurations ( ) } . */ private void findConfigurationsRecursively ( List < Integer > rGroupNumbers , List < List < Integer > > occurrences , List < Integer > occurIndexes , List < Integer [ ] > distributions , List < List < RGroup > > substitutes , int level , List < IAtomContainer > result ) throws CDKException { } }
if ( level == rGroupNumbers . size ( ) ) { if ( ! checkIfThenConditionsMet ( rGroupNumbers , distributions ) ) return ; // Clone the root to get a scaffold to plug the substitutes into . IAtomContainer root = this . getRootStructure ( ) ; IAtomContainer rootClone = null ; try { rootClone = ( IAtomContainer ) root . clone ( ) ; } catch ( CloneNotSupportedException e ) { // Abort with CDK exception throw new CDKException ( "clone() failed; could not perform R-group substitution." ) ; } for ( int rgpIdx = 0 ; rgpIdx < rGroupNumbers . size ( ) ; rgpIdx ++ ) { int rNum = rGroupNumbers . get ( rgpIdx ) ; int pos = 0 ; List < RGroup > mapped = substitutes . get ( rgpIdx ) ; for ( RGroup substitute : mapped ) { IAtom rAtom = this . getRgroupQueryAtoms ( rNum ) . get ( pos ) ; if ( substitute != null ) { IAtomContainer rgrpClone = null ; try { rgrpClone = ( IAtomContainer ) ( substitute . getGroup ( ) . clone ( ) ) ; } catch ( CloneNotSupportedException e ) { throw new CDKException ( "clone() failed; could not perform R-group substitution." ) ; } // root cloned , substitute cloned . These now need to be attached to each other . . rootClone . add ( rgrpClone ) ; Map < Integer , IBond > rAttachmentPoints = this . getRootAttachmentPoints ( ) . get ( rAtom ) ; if ( rAttachmentPoints != null ) { // Loop over attachment points of the R # atom for ( int apo = 0 ; apo < rAttachmentPoints . size ( ) ; apo ++ ) { IBond bond = rAttachmentPoints . get ( apo + 1 ) ; // Check how R # is attached to bond int whichAtomInBond = 0 ; if ( bond . getEnd ( ) . equals ( rAtom ) ) whichAtomInBond = 1 ; IAtom subsAt = null ; if ( apo == 0 ) subsAt = substitute . getFirstAttachmentPoint ( ) ; else subsAt = substitute . getSecondAttachmentPoint ( ) ; // Do substitution with the clones IBond cloneBond = rootClone . getBond ( getBondPosition ( bond , root ) ) ; if ( subsAt != null ) { IAtom subsCloneAtom = rgrpClone . getAtom ( getAtomPosition ( subsAt , substitute . getGroup ( ) ) ) ; cloneBond . setAtom ( subsCloneAtom , whichAtomInBond ) ; } } } // Optional : shift substitutes 2D for easier visual checking if ( rAtom . getPoint2d ( ) != null && substitute != null && substitute . getFirstAttachmentPoint ( ) != null && substitute . getFirstAttachmentPoint ( ) . getPoint2d ( ) != null ) { Point2d pointR = rAtom . getPoint2d ( ) ; Point2d pointC = substitute . getFirstAttachmentPoint ( ) . getPoint2d ( ) ; double xDiff = pointC . x - pointR . x ; double yDiff = pointC . y - pointR . y ; for ( IAtom subAt : rgrpClone . atoms ( ) ) { if ( subAt . getPoint2d ( ) != null ) { subAt . getPoint2d ( ) . x -= xDiff ; subAt . getPoint2d ( ) . y -= yDiff ; } } } } else { // Distribution flag is 0 , this means the R # group will not be substituted . // Any atom connected to this group should be given the defined RestH value . IAtom discarded = rootClone . getAtom ( getAtomPosition ( rAtom , root ) ) ; for ( IBond r0Bond : rootClone . bonds ( ) ) { if ( r0Bond . contains ( discarded ) ) { for ( IAtom atInBond : r0Bond . atoms ( ) ) { atInBond . setProperty ( CDKConstants . REST_H , this . getRGroupDefinitions ( ) . get ( rNum ) . isRestH ( ) ) ; } } } } pos ++ ; } } // Remove R # remnants from the clone , bonds and atoms that may linger . boolean confHasRGroupBonds = true ; while ( confHasRGroupBonds ) { for ( IBond cloneBond : rootClone . bonds ( ) ) { boolean removeBond = false ; if ( cloneBond . getBegin ( ) instanceof IPseudoAtom && isValidRgroupQueryLabel ( ( ( IPseudoAtom ) cloneBond . getBegin ( ) ) . getLabel ( ) ) ) removeBond = true ; else if ( cloneBond . getEnd ( ) instanceof IPseudoAtom && isValidRgroupQueryLabel ( ( ( IPseudoAtom ) cloneBond . getEnd ( ) ) . getLabel ( ) ) ) removeBond = true ; if ( removeBond ) { rootClone . removeBond ( cloneBond ) ; confHasRGroupBonds = true ; break ; } confHasRGroupBonds = false ; } } boolean confHasRGroupAtoms = true ; while ( confHasRGroupAtoms ) { for ( IAtom cloneAt : rootClone . atoms ( ) ) { if ( cloneAt instanceof IPseudoAtom ) if ( isValidRgroupQueryLabel ( ( ( IPseudoAtom ) cloneAt ) . getLabel ( ) ) ) { rootClone . removeAtomOnly ( cloneAt ) ; confHasRGroupAtoms = true ; break ; } confHasRGroupAtoms = false ; } } // Add to result list result . add ( rootClone ) ; } else { for ( int idx = 0 ; idx < occurrences . get ( level ) . size ( ) ; idx ++ ) { occurIndexes . set ( level , idx ) ; // With an occurrence picked 0 . . n for this level ' s R - group , now find // all possible distributions ( positional alternatives ) . int occurrence = occurrences . get ( level ) . get ( idx ) ; int positions = this . getRgroupQueryAtoms ( rGroupNumbers . get ( level ) ) . size ( ) ; Integer [ ] candidate = new Integer [ positions ] ; for ( int j = 0 ; j < candidate . length ; j ++ ) { candidate [ j ] = 0 ; } List < Integer [ ] > rgrpDistributions = new ArrayList < Integer [ ] > ( ) ; findDistributions ( occurrence , candidate , rgrpDistributions , 0 ) ; for ( Integer [ ] distribution : rgrpDistributions ) { distributions . set ( level , distribution ) ; RGroup [ ] mapping = new RGroup [ distribution . length ] ; List < List < RGroup > > mappedSubstitutes = new ArrayList < List < RGroup > > ( ) ; mapSubstitutes ( this . getRGroupDefinitions ( ) . get ( rGroupNumbers . get ( level ) ) , 0 , distribution , mapping , mappedSubstitutes ) ; for ( List < RGroup > mappings : mappedSubstitutes ) { substitutes . set ( level , mappings ) ; findConfigurationsRecursively ( rGroupNumbers , occurrences , occurIndexes , distributions , substitutes , level + 1 , result ) ; } } } }
public class EnglishTreebankParserParams { /** * Set language - specific options according to flags . * This routine should process the option starting in args [ i ] ( which * might potentially be several arguments long if it takes arguments ) . * It should return the index after the last index it consumed in * processing . In particular , if it cannot process the current option , * the return value should be i . */ @ Override public int setOptionFlag ( String [ ] args , int i ) { } }
// [ CDM 2008 : there are no generic options ! ] first , see if it ' s a generic option // int j = super . setOptionFlag ( args , i ) ; // if ( i ! = j ) return j ; // lang . specific options if ( args [ i ] . equalsIgnoreCase ( "-splitIN" ) ) { englishTrain . splitIN = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitPercent" ) ) { englishTrain . splitPercent = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitQuotes" ) ) { englishTrain . splitQuotes = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitSFP" ) ) { englishTrain . splitSFP = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitNNP" ) ) { englishTrain . splitNNP = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-rbGPA" ) ) { englishTrain . tagRBGPA = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitTRJJ" ) ) { englishTrain . splitTRJJ = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitJJCOMP" ) ) { englishTrain . splitJJCOMP = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitMoreLess" ) ) { englishTrain . splitMoreLess = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-unaryDT" ) ) { englishTrain . unaryDT = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-unaryRB" ) ) { englishTrain . unaryRB = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-unaryIN" ) ) { englishTrain . unaryIN = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-markReflexivePRP" ) ) { englishTrain . markReflexivePRP = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitCC" ) && i + 1 < args . length ) { englishTrain . splitCC = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitRB" ) ) { englishTrain . splitRB = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitAux" ) && i + 1 < args . length ) { englishTrain . splitAux = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitSbar" ) && i + 1 < args . length ) { englishTrain . splitSbar = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitVP" ) && i + 1 < args . length ) { englishTrain . splitVP = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitVPNPAgr" ) ) { englishTrain . splitVPNPAgr = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-gpaRootVP" ) ) { englishTrain . gpaRootVP = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-makePPTOintoIN" ) ) { englishTrain . makePPTOintoIN = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitSTag" ) ) { englishTrain . splitSTag = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitSGapped" ) && ( i + 1 < args . length ) ) { englishTrain . splitSGapped = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitNPpercent" ) && ( i + 1 < args . length ) ) { englishTrain . splitNPpercent = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitNPPRP" ) ) { englishTrain . splitNPPRP = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-dominatesV" ) && ( i + 1 < args . length ) ) { englishTrain . dominatesV = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-dominatesI" ) ) { englishTrain . dominatesI = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-dominatesC" ) ) { englishTrain . dominatesC = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitNPNNP" ) && ( i + 1 < args . length ) ) { englishTrain . splitNPNNP = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitTMP" ) && ( i + 1 < args . length ) ) { englishTrain . splitTMP = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitNPADV" ) && ( i + 1 < args . length ) ) { englishTrain . splitNPADV = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-markContainedVP" ) ) { englishTrain . markContainedVP = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-markDitransV" ) && ( i + 1 < args . length ) ) { englishTrain . markDitransV = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-splitPoss" ) && ( i + 1 < args . length ) ) { englishTrain . splitPoss = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-baseNP" ) && ( i + 1 < args . length ) ) { englishTrain . splitBaseNP = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-joinNounTags" ) ) { englishTrain . joinNounTags = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-correctTags" ) ) { englishTrain . correctTags = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-noCorrectTags" ) ) { englishTrain . correctTags = false ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-markCC" ) && ( i + 1 < args . length ) ) { englishTrain . markCC = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-noAnnotations" ) ) { englishTrain . splitVP = 0 ; englishTrain . splitTMP = NPTmpRetainingTreeNormalizer . TEMPORAL_NONE ; englishTrain . splitSGapped = 0 ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-retainNPTMPSubcategories" ) ) { englishTest . retainNPTMPSubcategories = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-retainTMPSubcategories" ) ) { englishTest . retainTMPSubcategories = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-retainADVSubcategories" ) ) { englishTest . retainADVSubcategories = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-leaveItAll" ) && ( i + 1 < args . length ) ) { englishTrain . leaveItAll = Integer . parseInt ( args [ i + 1 ] ) ; i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-headFinder" ) && ( i + 1 < args . length ) ) { // At present need to do this manually at train _ and _ test time ; // it ' s not serialized try { headFinder = ( HeadFinder ) Class . forName ( args [ i + 1 ] ) . newInstance ( ) ; } catch ( Exception e ) { System . err . println ( e ) ; System . err . println ( "Warning: Default HeadFinder will be used." ) ; } i += 2 ; } else if ( args [ i ] . equalsIgnoreCase ( "-makeCopulaHead" ) ) { englishTest . makeCopulaHead = true ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-acl03pcfg" ) ) { englishTrain . splitIN = 3 ; englishTrain . splitPercent = true ; englishTrain . splitPoss = 1 ; englishTrain . splitCC = 2 ; englishTrain . unaryDT = true ; englishTrain . unaryRB = true ; englishTrain . splitAux = 1 ; englishTrain . splitVP = 2 ; englishTrain . splitSGapped = 3 ; englishTrain . dominatesV = 1 ; englishTrain . splitTMP = NPTmpRetainingTreeNormalizer . TEMPORAL_ACL03PCFG ; englishTrain . splitBaseNP = 1 ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-jenny" ) ) { englishTrain . splitIN = 3 ; englishTrain . splitPercent = true ; englishTrain . splitPoss = 1 ; englishTrain . splitCC = 2 ; englishTrain . unaryDT = true ; englishTrain . unaryRB = true ; englishTrain . splitAux = 1 ; englishTrain . splitVP = 2 ; englishTrain . splitSGapped = 3 ; englishTrain . dominatesV = 1 ; englishTrain . splitTMP = NPTmpRetainingTreeNormalizer . TEMPORAL_ACL03PCFG ; englishTrain . splitBaseNP = 1 ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-linguisticPCFG" ) ) { englishTrain . splitIN = 3 ; englishTrain . splitPercent = true ; englishTrain . splitPoss = 1 ; englishTrain . splitCC = 2 ; englishTrain . unaryDT = true ; englishTrain . unaryRB = true ; englishTrain . splitAux = 2 ; englishTrain . splitVP = 3 ; englishTrain . splitSGapped = 4 ; englishTrain . dominatesV = 0 ; // not for linguistic englishTrain . splitTMP = NPTmpRetainingTreeNormalizer . TEMPORAL_ACL03PCFG ; englishTrain . splitBaseNP = 1 ; englishTrain . splitMoreLess = true ; englishTrain . correctTags = true ; // different from acl03pcfg i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-goodPCFG" ) ) { englishTrain . splitIN = 4 ; // different from acl03pcfg englishTrain . splitPercent = true ; englishTrain . splitNPpercent = 0 ; // no longer different from acl03pcfg englishTrain . splitPoss = 1 ; englishTrain . splitCC = 1 ; englishTrain . unaryDT = true ; englishTrain . unaryRB = true ; englishTrain . splitAux = 2 ; // different from acl03pcfg englishTrain . splitVP = 3 ; // different from acl03pcfg englishTrain . splitSGapped = 4 ; englishTrain . dominatesV = 1 ; englishTrain . splitTMP = NPTmpRetainingTreeNormalizer . TEMPORAL_ACL03PCFG ; englishTrain . splitNPADV = 1 ; // different from acl03pcfg englishTrain . splitBaseNP = 1 ; // englishTrain . splitMoreLess = true ; / / different from acl03pcfg englishTrain . correctTags = true ; // different from acl03pcfg englishTrain . markDitransV = 2 ; // different from acl03pcfg i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-ijcai03" ) ) { englishTrain . splitIN = 3 ; englishTrain . splitPercent = true ; englishTrain . splitPoss = 1 ; englishTrain . splitCC = 2 ; englishTrain . unaryDT = false ; englishTrain . unaryRB = false ; englishTrain . splitAux = 0 ; englishTrain . splitVP = 2 ; englishTrain . splitSGapped = 4 ; englishTrain . dominatesV = 0 ; englishTrain . splitTMP = NPTmpRetainingTreeNormalizer . TEMPORAL_ACL03PCFG ; englishTrain . splitBaseNP = 1 ; i += 1 ; } else if ( args [ i ] . equalsIgnoreCase ( "-goodFactored" ) ) { englishTrain . splitIN = 3 ; englishTrain . splitPercent = true ; englishTrain . splitPoss = 1 ; englishTrain . splitCC = 2 ; englishTrain . unaryDT = false ; englishTrain . unaryRB = false ; englishTrain . splitAux = 0 ; englishTrain . splitVP = 3 ; // different from ijcai03 englishTrain . splitSGapped = 4 ; englishTrain . dominatesV = 0 ; englishTrain . splitTMP = NPTmpRetainingTreeNormalizer . TEMPORAL_ACL03PCFG ; englishTrain . splitBaseNP = 1 ; // BAD ! ! englishTrain . markCC = 1 ; / / different from ijcai03 englishTrain . correctTags = true ; // different from ijcai03 i += 1 ; } return i ;
public class FunctionRewriter { /** * Parse helper code needed by a reducer . * @ return Helper code root . If parse fails , return null . */ public Node parseHelperCode ( Reducer reducer ) { } }
Node root = compiler . parseSyntheticCode ( reducer . getClass ( ) + ":helper" , reducer . getHelperSource ( ) ) ; return ( root != null ) ? root . removeFirstChild ( ) : null ;
public class XMLSerializer { /** * End document . * @ throws IOException Signals that an I / O exception has occurred . */ public void endDocument ( ) throws IOException { } }
// close all unclosed tag ; while ( depth > 0 ) { endTag ( elNamespace [ depth ] , elName [ depth ] ) ; } // assert depth = = 0; // assert startTagIncomplete = = false ; finished = pastRoot = startTagIncomplete = true ; out . flush ( ) ;
public class ParallelInference { /** * This method gracefully shuts down ParallelInference instance */ public synchronized void shutdown ( ) { } }
if ( zoo == null ) return ; for ( int e = 0 ; e < zoo . length ; e ++ ) { if ( zoo [ e ] == null ) continue ; zoo [ e ] . interrupt ( ) ; zoo [ e ] . shutdown ( ) ; zoo [ e ] = null ; } zoo = null ; System . gc ( ) ;
public class DiagnosticsInner { /** * Get Site Analyses . * Get Site Analyses . * ServiceResponse < PageImpl < AnalysisDefinitionInner > > * @ param nextPageLink The NextLink from the previous successful call to List operation . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the PagedList & lt ; AnalysisDefinitionInner & gt ; object wrapped in { @ link ServiceResponse } if successful . */ public Observable < ServiceResponse < Page < AnalysisDefinitionInner > > > listSiteAnalysesSlotNextSinglePageAsync ( final String nextPageLink ) { } }
if ( nextPageLink == null ) { throw new IllegalArgumentException ( "Parameter nextPageLink is required and cannot be null." ) ; } String nextUrl = String . format ( "%s" , nextPageLink ) ; return service . listSiteAnalysesSlotNext ( nextUrl , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < Page < AnalysisDefinitionInner > > > > ( ) { @ Override public Observable < ServiceResponse < Page < AnalysisDefinitionInner > > > call ( Response < ResponseBody > response ) { try { ServiceResponse < PageImpl < AnalysisDefinitionInner > > result = listSiteAnalysesSlotNextDelegate ( response ) ; return Observable . just ( new ServiceResponse < Page < AnalysisDefinitionInner > > ( result . body ( ) , result . response ( ) ) ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ;
public class BasicHttpClient { /** * Override this method in case you want to make the Charset response differently for your project . * Otherwise the framework falls back to this implementation by default which means - If the Charset * is not set by the server framework will default to Charset . defaultCharset ( ) , otherwise it will * use the Charset sent by the server e . g . UAT - 8 or UTF - 16 or UTF - 32 etc . * Note - * See implementation of java . nio . charset . Charset # defaultCharset . Here the default is UTF - 8 if the * defaultCharset is not set by the JVM , otherwise it picks the JVM provided defaultCharset * @ param httpResponse * @ return : A http response compatible with Charset received from the http server e . g . UTF - 8 , UTF - 16 etc * @ throws IOException * @ author santhoshkumar santhoshTpixler */ public Response createCharsetResponse ( CloseableHttpResponse httpResponse ) throws IOException { } }
HttpEntity entity = httpResponse . getEntity ( ) ; Charset charset = ContentType . getOrDefault ( httpResponse . getEntity ( ) ) . getCharset ( ) ; charset = ( charset == null ) ? Charset . defaultCharset ( ) : charset ; return Response . status ( httpResponse . getStatusLine ( ) . getStatusCode ( ) ) . entity ( entity != null ? IOUtils . toString ( entity . getContent ( ) , charset ) : null ) . build ( ) ;
public class PropertiesManager { /** * Save the current value of the given property to the file without * modifying the values of any other properties in the file . In other words , * { @ link # isModified ( Object ) } will return < code > false < / code > for the given * property after this call completes , but it will return < code > true < / code > * for any other properties that have been modified since the last load or * save . < br > * < br > * This method will not block to wait for the property to be saved . See * { @ link # saveProperty ( Object ) } for a blocking version . * @ param property * the property to save * @ return a task representing this save request * @ throws IOException * if there is an error while attempting to write the property * to the file */ public Future < Void > savePropertyNB ( final T property ) { } }
Callable < Void > task = new Callable < Void > ( ) { @ Override public Void call ( ) throws Exception { properties . save ( getFile ( ) , comment , isSavingDefaults ( ) , getTranslator ( ) . getPropertyName ( property ) ) ; firePropertySaved ( property ) ; return null ; } } ; return executor . submit ( task ) ;
public class NonBlockingBufferedWriter { /** * Writes a portion of a String . * If the value of the < tt > len < / tt > parameter is negative then no characters * are written . This is contrary to the specification of this method in the * { @ linkplain java . io . Writer # write ( java . lang . String , int , int ) superclass } , * which requires that an { @ link IndexOutOfBoundsException } be thrown . * @ param s * String to be written * @ param off * Offset from which to start reading characters * @ param len * Number of characters to be written * @ exception IOException * If an I / O error occurs */ @ Override @ SuppressFBWarnings ( "IL_INFINITE_LOOP" ) public void write ( final String s , final int off , final int len ) throws IOException { } }
_ensureOpen ( ) ; int b = off ; final int t = off + len ; while ( b < t ) { final int d = Math . min ( m_nChars - m_nNextChar , t - b ) ; s . getChars ( b , b + d , m_aBuf , m_nNextChar ) ; b += d ; m_nNextChar += d ; if ( m_nNextChar >= m_nChars ) flushBuffer ( ) ; }
public class CmsADESessionCache { /** * Gets the cached template bean for a given container page uri . < p > * @ param uri the container page uri * @ param safe if true , return a valid template bean even if it hasn ' t been cached before * @ return the template bean */ public TemplateBean getTemplateBean ( String uri , boolean safe ) { } }
TemplateBean templateBean = m_templateBeanCache . get ( uri ) ; if ( ( templateBean != null ) || ! safe ) { return templateBean ; } return new TemplateBean ( "" , "" ) ;
public class ElasticSearchHelper { /** * 获取elasticSearch对应的elasticSearch服务器对象 * @ param elasticSearch * @ return */ public static ElasticSearch getElasticSearchSink ( String elasticSearch ) { } }
init ( ) ; if ( elasticSearch == null || elasticSearch . equals ( "" ) ) { return elasticSearchSink ; } ElasticSearch elasticSearchSink = elasticSearchMap . get ( elasticSearch ) ; if ( elasticSearchSink == null ) { synchronized ( elasticSearchMap ) { elasticSearchSink = elasticSearchMap . get ( elasticSearch ) ; if ( elasticSearchSink != null ) return elasticSearchSink ; context = DefaultApplicationContext . getApplicationContext ( "conf/elasticsearch.xml" ) ; elasticSearchSink = context . getTBeanObject ( elasticSearch , ElasticSearch . class ) ; if ( elasticSearchSink != null ) { elasticSearchMap . put ( elasticSearch , elasticSearchSink ) ; } } } return elasticSearchSink ;
public class Array { /** * Returns the length of the specified array object , as an { @ code int } . * @ param array the array * @ return the length of the array * @ exception IllegalArgumentException if the object argument is not * an array */ public static int getLength ( Object array ) { } }
if ( array instanceof Object [ ] ) { return ( ( Object [ ] ) array ) . length ; } else if ( array instanceof boolean [ ] ) { return ( ( boolean [ ] ) array ) . length ;
public class SightResourcesImpl { /** * Get a specified Sight . * It mirrors to the following Smartsheet REST API method : GET / sights / { sightId } * @ param sightId the Id of the Sight * @ param level compatibility level * @ return the Sight resource . * @ throws IllegalArgumentException if any argument is null or empty string * @ throws InvalidRequestException if there is any problem with the REST API request * @ throws AuthorizationException if there is any problem with the REST API authorization ( access token ) * @ throws ResourceNotFoundException if the resource cannot be found * @ throws ServiceUnavailableException if the REST API service is not available ( possibly due to rate limiting ) * @ throws SmartsheetException if there is any other error during the operation */ public Sight getSight ( long sightId , Integer level ) throws SmartsheetException { } }
String path = "sights/" + sightId ; HashMap < String , Object > parameters = new HashMap < String , Object > ( ) ; if ( level != null ) { parameters . put ( "level" , level ) ; } path += QueryUtil . generateUrl ( null , parameters ) ; return this . getResource ( path , Sight . class ) ;
public class ComponentFactory { /** * Factory method for create a new { @ link Label } with the for attribute . * @ param id * the id * @ param forId * the for id * @ param resourceBundleKey * the resource key * @ param component * the component to find resource keys * @ return the new { @ link Label } . */ public static Label newLabel ( final String id , final String forId , final ResourceBundleKey resourceBundleKey , final Component component ) { } }
return ComponentFactory . newLabel ( id , forId , ResourceModelFactory . newResourceModel ( resourceBundleKey , component ) ) ;
public class Rollbar { /** * Record a debugging message with extra information attached . * @ param message the message . * @ param custom the extra information . */ public void debug ( String message , Map < String , Object > custom ) { } }
debug ( null , custom , message ) ;
public class ArrayUtils { /** * Concatenates the specified arrays . * @ param < T > the type of array element * @ param first the specified first array * @ param rest the specified rest arrays * @ return concatenated array */ public static < T > T [ ] concatenate ( final T [ ] first , final T [ ] ... rest ) { } }
int totalLength = first . length ; for ( final T [ ] array : rest ) { totalLength += array . length ; } final T [ ] ret = Arrays . copyOf ( first , totalLength ) ; int offset = first . length ; for ( final T [ ] array : rest ) { System . arraycopy ( array , 0 , ret , offset , array . length ) ; offset += array . length ; } return ret ;
public class Sendinblue { /** * Delete already existing users in the SendinBlue contacts from the list . * @ param { Object } data contains json objects as a key value pair from HashMap . * @ options data { Integer } id : Id of list to unlink users from it [ Mandatory ] * @ options data { Array } users : Email address of the already existing user ( s ) in the SendinBlue contacts to be modified . Example : " test @ example . net " . You can use commas to separate multiple users [ Mandatory ] */ public String delete_users_list ( Map < String , Object > data ) { } }
String id = data . get ( "id" ) . toString ( ) ; Gson gson = new Gson ( ) ; String json = gson . toJson ( data ) ; return delete ( "list/" + id + "/delusers" , json ) ;
public class ResourceUtil { /** * return diffrents of one file to a other if first is child of second otherwise return null * @ param file file to search * @ param dir directory to search */ public static String getPathToChild ( Resource file , Resource dir ) { } }
if ( dir == null || ! file . getResourceProvider ( ) . getScheme ( ) . equals ( dir . getResourceProvider ( ) . getScheme ( ) ) ) return null ; boolean isFile = file . isFile ( ) ; String str = "/" ; while ( file != null ) { if ( file . equals ( dir ) ) { if ( isFile ) return str . substring ( 0 , str . length ( ) - 1 ) ; return str ; } str = "/" + file . getName ( ) + str ; file = file . getParentResource ( ) ; } return null ;
public class CheckGlobalThis { /** * Since this pass reports errors only when a global { @ code this } keyword * is encountered , there is no reason to traverse non global contexts . */ @ Override public boolean shouldTraverse ( NodeTraversal t , Node n , Node parent ) { } }
if ( n . isFunction ( ) ) { // Arrow functions automatically get the " this " value from the // enclosing scope . e . g . the " this " in // Foo . prototype . getBar = ( ) = > this . bar ; // is the global " this " , not an instance of Foo . if ( n . isArrowFunction ( ) ) { return true ; } // Don ' t traverse functions that are constructors or have the @ this // or @ override annotation . JSDocInfo jsDoc = NodeUtil . getBestJSDocInfo ( n ) ; if ( jsDoc != null && ( jsDoc . isConstructor ( ) || jsDoc . isInterface ( ) || jsDoc . hasThisType ( ) || jsDoc . isOverride ( ) ) ) { return false ; } // Don ' t traverse functions unless they would normally // be able to have a @ this annotation associated with them . e . g . , // var a = function ( ) { } ; / / or // function a ( ) { } / / or // a . x = function ( ) { } ; / / or // var a = { x : function ( ) { } } ; Token pType = parent . getToken ( ) ; if ( ! ( pType == Token . BLOCK || pType == Token . SCRIPT || pType == Token . NAME || pType == Token . ASSIGN || // object literal keys pType == Token . STRING_KEY ) ) { return false ; } // Don ' t traverse functions that are getting lent to a prototype . Node grandparent = parent . getParent ( ) ; if ( NodeUtil . mayBeObjectLitKey ( parent ) ) { JSDocInfo maybeLends = grandparent . getJSDocInfo ( ) ; if ( maybeLends != null && maybeLends . hasLendsName ( ) && maybeLends . getLendsName ( ) . getRoot ( ) . getString ( ) . endsWith ( ".prototype" ) ) { return false ; } } } if ( parent != null && parent . isAssign ( ) ) { Node lhs = parent . getFirstChild ( ) ; if ( n == lhs ) { // Always traverse the left side of the assignment . To handle // nested assignments properly ( e . g . , ( a = this ) . property = c ; ) , // assignLhsChild should not be overridden . if ( assignLhsChild == null ) { assignLhsChild = lhs ; } } else { // Only traverse the right side if it ' s not an assignment to a prototype // property or subproperty . if ( NodeUtil . isGet ( lhs ) ) { if ( lhs . isGetProp ( ) && lhs . getLastChild ( ) . getString ( ) . equals ( "prototype" ) ) { return false ; } Node llhs = lhs . getFirstChild ( ) ; if ( llhs . isGetProp ( ) && llhs . getLastChild ( ) . getString ( ) . equals ( "prototype" ) ) { return false ; } } } } return true ;
public class HangingExecutors { /** * finds ExecutorService objects that don ' t get a call to the terminating methods , and thus , never appear to be shutdown properly ( the threads exist until * shutdown is called ) * @ param classContext * the class context object of the currently parsed java class */ @ Override public void visitClassContext ( ClassContext classContext ) { } }
localHEDetector . visitClassContext ( classContext ) ; try { hangingFieldCandidates = new HashMap < > ( ) ; exemptExecutors = new HashMap < > ( ) ; parseFieldsForHangingCandidates ( classContext ) ; if ( ! hangingFieldCandidates . isEmpty ( ) ) { stack = new OpcodeStack ( ) ; super . visitClassContext ( classContext ) ; reportHangingExecutorFieldBugs ( ) ; } } finally { stack = null ; hangingFieldCandidates = null ; exemptExecutors = null ; }
public class CProductPersistenceImpl { /** * Returns the last c product in the ordered set where uuid = & # 63 ; and companyId = & # 63 ; . * @ param uuid the uuid * @ param companyId the company ID * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the last matching c product * @ throws NoSuchCProductException if a matching c product could not be found */ @ Override public CProduct findByUuid_C_Last ( String uuid , long companyId , OrderByComparator < CProduct > orderByComparator ) throws NoSuchCProductException { } }
CProduct cProduct = fetchByUuid_C_Last ( uuid , companyId , orderByComparator ) ; if ( cProduct != null ) { return cProduct ; } StringBundler msg = new StringBundler ( 6 ) ; msg . append ( _NO_SUCH_ENTITY_WITH_KEY ) ; msg . append ( "uuid=" ) ; msg . append ( uuid ) ; msg . append ( ", companyId=" ) ; msg . append ( companyId ) ; msg . append ( "}" ) ; throw new NoSuchCProductException ( msg . toString ( ) ) ;
public class ResultIterator { /** * On query . * @ param m * the m * @ param client * the client */ private void onQuery ( EntityMetadata m , Client client ) { } }
String tableName = HBaseUtils . getHTableName ( m . getSchema ( ) , m . getTableName ( ) ) ; try { handler . readData ( tableName , m , null , translator . getStartRow ( ) , translator . getEndRow ( ) , getColumnsToOuput ( ) , ( FilterList ) translator . getFilters ( ) ) ; } catch ( IOException e ) { log . error ( e . getMessage ( ) ) ; throw new KunderaException ( "Error in connecting to database or some network problem. Caused by: " , e ) ; }
public class SetOperation { /** * Returns true if given Family id is one of the set operations * @ param id the given Family id * @ return true if given Family id is one of the set operations */ static boolean isValidSetOpID ( final int id ) { } }
final Family family = Family . idToFamily ( id ) ; final boolean ret = ( ( family == Family . UNION ) || ( family == Family . INTERSECTION ) || ( family == Family . A_NOT_B ) ) ; return ret ;
public class Json4SpoonGenerator { /** * Decorates a node with the affected operator , if any . * @ param context * @ param tree * @ param operations * @ return */ public JsonObject getJSONwithOperations ( TreeContext context , ITree tree , List < Operation > operations ) { } }
OperationNodePainter opNodePainter = new OperationNodePainter ( operations ) ; Collection < NodePainter > painters = new ArrayList < NodePainter > ( ) ; painters . add ( opNodePainter ) ; return getJSONwithCustorLabels ( context , tree , painters ) ;
public class PluralRulesLoader { /** * Gets the rule from the rulesId . If there is no rule for this rulesId , * return null . */ public PluralRules getRulesForRulesId ( String rulesId ) { } }
// synchronize on the map . release the lock temporarily while we build the rules . PluralRules rules = null ; boolean hasRules ; // Separate boolean because stored rules can be null . synchronized ( rulesIdToRules ) { hasRules = rulesIdToRules . containsKey ( rulesId ) ; if ( hasRules ) { rules = rulesIdToRules . get ( rulesId ) ; // can be null } } if ( ! hasRules ) { try { UResourceBundle pluralb = getPluralBundle ( ) ; UResourceBundle rulesb = pluralb . get ( "rules" ) ; UResourceBundle setb = rulesb . get ( rulesId ) ; StringBuilder sb = new StringBuilder ( ) ; for ( int i = 0 ; i < setb . getSize ( ) ; ++ i ) { UResourceBundle b = setb . get ( i ) ; if ( i > 0 ) { sb . append ( "; " ) ; } sb . append ( b . getKey ( ) ) ; sb . append ( ": " ) ; sb . append ( b . getString ( ) ) ; } rules = PluralRules . parseDescription ( sb . toString ( ) ) ; } catch ( ParseException e ) { } catch ( MissingResourceException e ) { } synchronized ( rulesIdToRules ) { if ( rulesIdToRules . containsKey ( rulesId ) ) { rules = rulesIdToRules . get ( rulesId ) ; } else { rulesIdToRules . put ( rulesId , rules ) ; // can be null } } } return rules ;
public class SelectorReplayDispatcher { /** * check if mime - type detection is suggested for mimeType . * @ param mimeType mime - type to test ( must not be null / empty / " unk " ) * @ return { @ code true } if mime - type should be determined * by looking into Resource . */ protected boolean shouldDetectMimeType ( String mimeType ) { } }
for ( String prefix : untrustfulMimeTypes ) { if ( mimeType . startsWith ( prefix ) ) return true ; } return false ;
public class VersionInfo { /** * Returns a VersionInfo that reflects any inherited version information . * @ return merged version information . */ public VersionInfo merge ( ) { } }
if ( isReference ( ) ) { final VersionInfo refVersion = ( VersionInfo ) getCheckedRef ( VersionInfo . class , "VersionInfo" ) ; return refVersion . merge ( ) ; } Reference currentRef = this . getExtends ( ) ; if ( currentRef == null ) { return this ; } final Vector < VersionInfo > stack = new Vector < > ( 5 ) ; stack . addElement ( this ) ; while ( currentRef != null ) { final Object obj = currentRef . getReferencedObject ( getProject ( ) ) ; if ( obj instanceof VersionInfo ) { VersionInfo current = ( VersionInfo ) obj ; if ( current . isReference ( ) ) { current = ( VersionInfo ) current . getCheckedRef ( VersionInfo . class , "VersionInfo" ) ; } if ( stack . contains ( current ) ) { throw this . circularReference ( ) ; } stack . addElement ( current ) ; currentRef = current . getExtends ( ) ; } else { throw new BuildException ( "Referenced element " + currentRef . getRefId ( ) + " is not a versioninfo." ) ; } } return new VersionInfo ( stack ) ;
public class Complex { /** * Returns { @ code true } , if at least on of the sub - constraints still has to process a case i . e . * there is at least one sub - constraint with { @ code hasNextCase ( ) = = * true } . * @ return true , if one of the sub - constrains still has cases */ @ Override public boolean hasNextCase ( ) { } }
// this method only returns true if dependent values are grouped if ( ! dependent ) { return false ; } for ( Constraint c : constraints ) { if ( c . hasNextCase ( ) ) { return true ; } } return false ;
public class DynamicOutputBuffer { /** * Puts the given string as UTF - 8 into the buffer at the * given position . This method does not increase the write position . * @ param pos the position where to put the string * @ param s the string to put * @ return the number of UTF - 8 bytes put */ public int putUTF8 ( int pos , String s ) { } }
ByteBuffer minibb = null ; CharsetEncoder enc = getUTF8Encoder ( ) ; CharBuffer in = CharBuffer . wrap ( s ) ; int pos2 = pos ; ByteBuffer bb = getBuffer ( pos2 ) ; int index = pos2 % _bufferSize ; bb . position ( index ) ; while ( in . remaining ( ) > 0 ) { CoderResult res = enc . encode ( in , bb , true ) ; // flush minibb first if ( bb == minibb ) { bb . flip ( ) ; while ( bb . remaining ( ) > 0 ) { putByte ( pos2 , bb . get ( ) ) ; ++ pos2 ; } } else { pos2 += bb . position ( ) - index ; } if ( res . isOverflow ( ) ) { if ( bb . remaining ( ) > 0 ) { // exceeded buffer boundaries ; write to a small temporary buffer if ( minibb == null ) { minibb = ByteBuffer . allocate ( 4 ) ; } minibb . clear ( ) ; bb = minibb ; index = 0 ; } else { bb = getBuffer ( pos2 ) ; index = pos2 % _bufferSize ; bb . position ( index ) ; } } else if ( res . isError ( ) ) { try { res . throwException ( ) ; } catch ( CharacterCodingException e ) { throw new RuntimeException ( "Could not encode string" , e ) ; } } } adaptSize ( pos2 ) ; return pos2 - pos ;
public class CodecUtil { /** * Term prefix . * @ param term * the term * @ return the string */ public static String termPrefix ( String term ) { } }
int i = term . indexOf ( MtasToken . DELIMITER ) ; String prefix = term ; if ( i >= 0 ) { prefix = term . substring ( 0 , i ) ; } return prefix . replace ( "\u0000" , "" ) ;
public class SVGPath { /** * Quadratic Bezier line to the given coordinates . * @ param c1x first control point x * @ param c1y first control point y * @ param x new coordinates * @ param y new coordinates * @ return path object , for compact syntax . */ public SVGPath quadTo ( double c1x , double c1y , double x , double y ) { } }
return append ( PATH_QUAD_TO ) . append ( c1x ) . append ( c1y ) . append ( x ) . append ( y ) ;
public class AbstractSARLLaunchConfigurationDelegate { /** * Replies the SRE installation to be used for the given configuration . * @ param configuration the configuration to check . * @ param configAccessor the accessor to the SRE configuration . * @ param projectAccessor the accessor to the Java project . * @ return the SRE install . * @ throws CoreException if impossible to get the SRE . */ protected static ISREInstall getSREInstallFor ( ILaunchConfiguration configuration , ILaunchConfigurationAccessor configAccessor , IJavaProjectAccessor projectAccessor ) throws CoreException { } }
assert configAccessor != null ; assert projectAccessor != null ; final ISREInstall sre ; if ( configAccessor . getUseProjectSREFlag ( configuration ) ) { sre = getProjectSpecificSRE ( configuration , true , projectAccessor ) ; } else if ( configAccessor . getUseSystemSREFlag ( configuration ) ) { sre = SARLRuntime . getDefaultSREInstall ( ) ; verifySREValidity ( sre , sre . getId ( ) ) ; } else { final String runtime = configAccessor . getSREId ( configuration ) ; sre = SARLRuntime . getSREFromId ( runtime ) ; verifySREValidity ( sre , runtime ) ; } if ( sre == null ) { throw new CoreException ( SARLEclipsePlugin . getDefault ( ) . createStatus ( IStatus . ERROR , Messages . SARLLaunchConfigurationDelegate_0 ) ) ; } return sre ;
public class MorphiaUtils { /** * Joins strings with the given delimiter * @ param strings the strings to join * @ param delimiter the delimiter * @ return the joined string */ public static String join ( final List < String > strings , final char delimiter ) { } }
StringBuilder builder = new StringBuilder ( ) ; for ( String element : strings ) { if ( builder . length ( ) != 0 ) { builder . append ( delimiter ) ; } builder . append ( element ) ; } return builder . toString ( ) ;
public class TransformedRenditionHandler { /** * Searches for the biggest web - enabled rendition that matches the crop dimensions width and height or is bigger . * @ param candidates * @ return Rendition or null if no match found */ private VirtualTransformedRenditionMetadata getCropRendition ( NavigableSet < RenditionMetadata > candidates ) { } }
RenditionMetadata original = getOriginalRendition ( ) ; if ( original == null ) { return null ; } Double scaleFactor = getCropScaleFactor ( ) ; CropDimension scaledCropDimension = new CropDimension ( Math . round ( cropDimension . getLeft ( ) * scaleFactor ) , Math . round ( cropDimension . getTop ( ) * scaleFactor ) , Math . round ( cropDimension . getWidth ( ) * scaleFactor ) , Math . round ( cropDimension . getHeight ( ) * scaleFactor ) ) ; return new VirtualTransformedRenditionMetadata ( original . getRendition ( ) , rotateMapWidth ( scaledCropDimension . getWidth ( ) , scaledCropDimension . getHeight ( ) ) , rotateMapHeight ( scaledCropDimension . getWidth ( ) , scaledCropDimension . getHeight ( ) ) , scaledCropDimension , rotation ) ;
public class AsteriskChannelImpl { /** * Sets the caller id of this channel . * @ param callerId the caller id of this channel . */ void setCallerId ( final CallerId callerId ) { } }
final CallerId oldCallerId = this . callerId ; this . callerId = callerId ; firePropertyChange ( PROPERTY_CALLER_ID , oldCallerId , callerId ) ;
public class AwsSecurityFindingFilters { /** * An ISO8601 - formatted timestamp that indicates when the finding record was last updated by the security findings * provider . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setUpdatedAt ( java . util . Collection ) } or { @ link # withUpdatedAt ( java . util . Collection ) } if you want to * override the existing values . * @ param updatedAt * An ISO8601 - formatted timestamp that indicates when the finding record was last updated by the security * findings provider . * @ return Returns a reference to this object so that method calls can be chained together . */ public AwsSecurityFindingFilters withUpdatedAt ( DateFilter ... updatedAt ) { } }
if ( this . updatedAt == null ) { setUpdatedAt ( new java . util . ArrayList < DateFilter > ( updatedAt . length ) ) ; } for ( DateFilter ele : updatedAt ) { this . updatedAt . add ( ele ) ; } return this ;
public class SpringCamelContextFactory { /** * Create a { @ link SpringCamelContext } list from the given URL */ public static List < SpringCamelContext > createCamelContextList ( URL contextUrl , ClassLoader classsLoader ) throws Exception { } }
SpringCamelContextBootstrap bootstrap = new SpringCamelContextBootstrap ( contextUrl , classsLoader ) ; return bootstrap . createSpringCamelContexts ( ) ;
public class SimpleBase { /** * Returns a matrix which is the result of an element by element exp of ' this ' * c < sub > i , j < / sub > = Math . log ( a < sub > i , j < / sub > ) * @ return The element by element power of ' this ' and ' b ' . */ public T elementLog ( ) { } }
T c = createLike ( ) ; ops . elementLog ( mat , c . mat ) ; return c ;
public class Server { /** * Accepts any new connections and reads or writes any pending data for the current connections . * @ param timeout Wait for up to the specified milliseconds for a connection to be ready to process . May be zero to return * immediately if there are no connections to process . */ public void update ( int timeout ) throws IOException { } }
updateThread = Thread . currentThread ( ) ; synchronized ( updateLock ) { // Blocks to avoid a select while the selector is used to bind the server connection . } long startTime = System . currentTimeMillis ( ) ; int select = 0 ; if ( timeout > 0 ) { select = selector . select ( timeout ) ; } else { select = selector . selectNow ( ) ; } if ( select == 0 ) { emptySelects ++ ; if ( emptySelects == 100 ) { emptySelects = 0 ; // NIO freaks and returns immediately with 0 sometimes , so try to keep from hogging the CPU . long elapsedTime = System . currentTimeMillis ( ) - startTime ; try { if ( elapsedTime < 25 ) Thread . sleep ( 25 - elapsedTime ) ; } catch ( InterruptedException ex ) { } } } else { emptySelects = 0 ; Set < SelectionKey > keys = selector . selectedKeys ( ) ; synchronized ( keys ) { UdpConnection udp = this . udp ; outer : for ( Iterator < SelectionKey > iter = keys . iterator ( ) ; iter . hasNext ( ) ; ) { keepAlive ( ) ; SelectionKey selectionKey = iter . next ( ) ; iter . remove ( ) ; Connection fromConnection = ( Connection ) selectionKey . attachment ( ) ; try { int ops = selectionKey . readyOps ( ) ; if ( fromConnection != null ) { // Must be a TCP read or write operation . if ( udp != null && fromConnection . udpRemoteAddress == null ) { fromConnection . close ( ) ; continue ; } if ( ( ops & SelectionKey . OP_READ ) == SelectionKey . OP_READ ) { try { while ( true ) { Object object = fromConnection . tcp . readObject ( fromConnection ) ; if ( object == null ) break ; if ( DEBUG ) { String objectString = object == null ? "null" : object . getClass ( ) . getSimpleName ( ) ; if ( ! ( object instanceof FrameworkMessage ) ) { debug ( "kryonet" , fromConnection + " received TCP: " + objectString ) ; } else if ( TRACE ) { trace ( "kryonet" , fromConnection + " received TCP: " + objectString ) ; } } fromConnection . notifyReceived ( object ) ; } } catch ( IOException ex ) { if ( TRACE ) { trace ( "kryonet" , "Unable to read TCP from: " + fromConnection , ex ) ; } else if ( DEBUG ) { debug ( "kryonet" , fromConnection + " update: " + ex . getMessage ( ) ) ; } fromConnection . close ( ) ; } catch ( KryoNetException ex ) { if ( ERROR ) error ( "kryonet" , "Error reading TCP from connection: " + fromConnection , ex ) ; fromConnection . close ( ) ; } } if ( ( ops & SelectionKey . OP_WRITE ) == SelectionKey . OP_WRITE ) { try { fromConnection . tcp . writeOperation ( ) ; } catch ( IOException ex ) { if ( TRACE ) { trace ( "kryonet" , "Unable to write TCP to connection: " + fromConnection , ex ) ; } else if ( DEBUG ) { debug ( "kryonet" , fromConnection + " update: " + ex . getMessage ( ) ) ; } fromConnection . close ( ) ; } } continue ; } if ( ( ops & SelectionKey . OP_ACCEPT ) == SelectionKey . OP_ACCEPT ) { ServerSocketChannel serverChannel = this . serverChannel ; if ( serverChannel == null ) continue ; try { SocketChannel socketChannel = serverChannel . accept ( ) ; if ( socketChannel != null ) acceptOperation ( socketChannel ) ; } catch ( IOException ex ) { if ( DEBUG ) debug ( "kryonet" , "Unable to accept new connection." , ex ) ; } continue ; } // Must be a UDP read operation . if ( udp == null ) { selectionKey . channel ( ) . close ( ) ; continue ; } InetSocketAddress fromAddress ; try { fromAddress = udp . readFromAddress ( ) ; } catch ( IOException ex ) { if ( WARN ) warn ( "kryonet" , "Error reading UDP data." , ex ) ; continue ; } if ( fromAddress == null ) continue ; Connection [ ] connections = this . connections ; for ( int i = 0 , n = connections . length ; i < n ; i ++ ) { Connection connection = connections [ i ] ; if ( fromAddress . equals ( connection . udpRemoteAddress ) ) { fromConnection = connection ; break ; } } Object object ; try { object = udp . readObject ( fromConnection ) ; } catch ( KryoNetException ex ) { if ( WARN ) { if ( fromConnection != null ) { if ( ERROR ) error ( "kryonet" , "Error reading UDP from connection: " + fromConnection , ex ) ; } else warn ( "kryonet" , "Error reading UDP from unregistered address: " + fromAddress , ex ) ; } continue ; } if ( object instanceof FrameworkMessage ) { if ( object instanceof RegisterUDP ) { // Store the fromAddress on the connection and reply over TCP with a RegisterUDP to indicate success . int fromConnectionID = ( ( RegisterUDP ) object ) . connectionID ; Connection connection = pendingConnections . remove ( fromConnectionID ) ; if ( connection != null ) { if ( connection . udpRemoteAddress != null ) continue outer ; connection . udpRemoteAddress = fromAddress ; addConnection ( connection ) ; connection . sendTCP ( new RegisterUDP ( ) ) ; if ( DEBUG ) debug ( "kryonet" , "Port " + udp . datagramChannel . socket ( ) . getLocalPort ( ) + "/UDP connected to: " + fromAddress ) ; connection . notifyConnected ( ) ; continue ; } if ( DEBUG ) debug ( "kryonet" , "Ignoring incoming RegisterUDP with invalid connection ID: " + fromConnectionID ) ; continue ; } if ( object instanceof DiscoverHost ) { try { boolean responseSent = discoveryHandler . onDiscoverHost ( udp . datagramChannel , fromAddress , serialization ) ; if ( DEBUG && responseSent ) debug ( "kryonet" , "Responded to host discovery from: " + fromAddress ) ; } catch ( IOException ex ) { if ( WARN ) warn ( "kryonet" , "Error replying to host discovery from: " + fromAddress , ex ) ; } continue ; } } if ( fromConnection != null ) { if ( DEBUG ) { String objectString = object == null ? "null" : object . getClass ( ) . getSimpleName ( ) ; if ( object instanceof FrameworkMessage ) { if ( TRACE ) trace ( "kryonet" , fromConnection + " received UDP: " + objectString ) ; } else debug ( "kryonet" , fromConnection + " received UDP: " + objectString ) ; } fromConnection . notifyReceived ( object ) ; continue ; } if ( DEBUG ) debug ( "kryonet" , "Ignoring UDP from unregistered address: " + fromAddress ) ; } catch ( CancelledKeyException ex ) { if ( fromConnection != null ) fromConnection . close ( ) ; else selectionKey . channel ( ) . close ( ) ; } } } } long time = System . currentTimeMillis ( ) ; Connection [ ] connections = this . connections ; for ( int i = 0 , n = connections . length ; i < n ; i ++ ) { Connection connection = connections [ i ] ; if ( connection . tcp . isTimedOut ( time ) ) { if ( DEBUG ) debug ( "kryonet" , connection + " timed out." ) ; connection . close ( ) ; } else { if ( connection . tcp . needsKeepAlive ( time ) ) connection . sendTCP ( FrameworkMessage . keepAlive ) ; } if ( connection . isIdle ( ) ) connection . notifyIdle ( ) ; }
public class Functions { /** * Create a { @ link Map } by applying a function that extracts the key from an array of values . * @ param key the key extraction function . * @ param values the values of the map ( from which the keys are extracted ) . * @ param < K > the type of the keys . * @ param < V > the type of the values . * @ return a { @ link Map } from the keys of the values to the values . */ @ SafeVarargs public static < K , V > Map < K , V > map ( Function < V , K > key , V ... values ) { } }
return values == null || values . length == 0 ? emptyMap ( ) : map ( asList ( values ) , key ) ;
public class ActivityUtils { /** * Checks if a package is installed . * @ param context Context to be used to verify the existence of the package . * @ param packageName Package name to be searched . * @ return true if the package is discovered ; false otherwise */ public static boolean isPackageInstalled ( Context context , String packageName ) { } }
try { context . getPackageManager ( ) . getApplicationInfo ( packageName , 0 ) ; return true ; } catch ( Exception e ) { return false ; }
public class StatsTraceContext { /** * Factory method for the server - side . */ public static StatsTraceContext newServerContext ( List < ? extends ServerStreamTracer . Factory > factories , String fullMethodName , Metadata headers ) { } }
if ( factories . isEmpty ( ) ) { return NOOP ; } StreamTracer [ ] tracers = new StreamTracer [ factories . size ( ) ] ; for ( int i = 0 ; i < tracers . length ; i ++ ) { tracers [ i ] = factories . get ( i ) . newServerStreamTracer ( fullMethodName , headers ) ; } return new StatsTraceContext ( tracers ) ;
public class VoldemortBuildAndPushJob { /** * This function takes care of interrogating the servers to know which optional * features are supported and enabled , including : * 1 ) block - level compression , * 2 ) high - availability push , * 3 ) build primary replicas only . * TODO : Eventually , it ' d be nice to migrate all of these code paths to the new simpler API : * { @ link AdminClient . MetadataManagementOperations # getServerConfig ( int , java . util . Set ) } * This function mutates the internal state of the job accordingly . */ private void negotiateJobSettingsWithServers ( ) { } }
// 1 . Get block - level compression settings // FIXME : Currently this code requests only one cluster for its supported compression codec . log . info ( "Requesting block-level compression codec expected by Server" ) ; String chosenCodec = null ; List < String > supportedCodecs ; try { supportedCodecs = adminClientPerCluster . get ( clusterURLs . get ( 0 ) ) . readonlyOps . getSupportedROStorageCompressionCodecs ( ) ; String codecList = "[ " ; for ( String str : supportedCodecs ) { codecList += str + " " ; } codecList += "]" ; log . info ( "Server responded with block-level compression codecs: " + codecList ) ; /* * TODO for now only checking if there is a match between the server * supported codec and the one that we support . Later this method can be * extended to add more compression types or pick the first type * returned by the server . */ for ( String codecStr : supportedCodecs ) { if ( codecStr . toUpperCase ( Locale . ENGLISH ) . equals ( KeyValueWriter . COMPRESSION_CODEC ) ) { chosenCodec = codecStr ; break ; } } } catch ( Exception e ) { log . error ( "Exception thrown when requesting for supported block-level compression codecs. " + "Server might be running in a older version. Exception: " + e . getMessage ( ) ) ; // We will continue without block - level compression enabled } if ( chosenCodec != null ) { log . info ( "Using block-level compression codec: " + chosenCodec ) ; this . props . put ( REDUCER_OUTPUT_COMPRESS , "true" ) ; this . props . put ( REDUCER_OUTPUT_COMPRESS_CODEC , chosenCodec ) ; } else { log . info ( "Using no block-level compression" ) ; } // 2 . Get High - Availability settings this . haSettingsPerCluster = Maps . newHashMap ( ) ; if ( ! pushHighAvailability ) { log . info ( "pushHighAvailability is disabled by the job config." ) ; } else { // HA is enabled by the BnP job config for ( String clusterUrl : clusterURLs ) { try { VAdminProto . GetHighAvailabilitySettingsResponse serverSettings = adminClientPerCluster . get ( clusterUrl ) . readonlyOps . getHighAvailabilitySettings ( ) ; this . haSettingsPerCluster . put ( clusterUrl , serverSettings ) ; } catch ( UninitializedMessageException e ) { // Not printing out the exception in the logs as that is a benign error . log . error ( "The server does not support HA (introduced in release 1.9.20), so " + "pushHighAvailability will be DISABLED on cluster: " + clusterUrl ) ; } catch ( Exception e ) { log . error ( "Got exception while trying to determine pushHighAvailability settings on cluster: " + clusterUrl , e ) ; } } } // 3 . Get " build . primary . replicas . only " setting Map < String , String > expectedConfig = Maps . newHashMap ( ) ; expectedConfig . put ( VoldemortConfig . READONLY_BUILD_PRIMARY_REPLICAS_ONLY , Boolean . toString ( true ) ) ; this . buildPrimaryReplicasOnly = true ; for ( String clusterUrl : clusterURLs ) { VAdminProto . GetHighAvailabilitySettingsResponse serverSettings = haSettingsPerCluster . get ( clusterUrl ) ; int maxNodeFailuresForCluster = 0 ; if ( serverSettings != null ) { maxNodeFailuresForCluster = serverSettings . getMaxNodeFailure ( ) ; } if ( ! adminClientPerCluster . get ( clusterUrl ) . metadataMgmtOps . validateServerConfig ( expectedConfig , maxNodeFailuresForCluster ) ) { log . info ( "'" + BUILD_PRIMARY_REPLICAS_ONLY + "' is not supported on this destination cluster: " + clusterUrl ) ; this . buildPrimaryReplicasOnly = false ; } }
public class XMLEncoder { /** * Writes an XML declaration with double quotes . * @ param out the character stream to write to , not < code > null < / code > . * @ param quotationMark the quotationMark to use , either < code > ' \ ' ' < / code > or < code > ' " ' < / code > . * @ throws IllegalArgumentException if * < code > quotationMark ! = ' \ ' ' & amp ; & amp ; quotationMark ! = ' " ' < / code > * @ throws NullPointerException if < code > out = = null < / code > . * @ throws IOException if an I / O error occurs . * @ since XMLenc 0.54 */ public void declaration ( Writer out , char quotationMark ) throws IllegalArgumentException , NullPointerException , IOException { } }
if ( quotationMark == '"' ) { out . write ( _declarationDoubleQuotes ) ; } else if ( quotationMark == '\'' ) { out . write ( _declarationSingleQuotes ) ; }
public class Cursor { /** * Fetches single entity or null . * @ return Single entity if result set have one more , otherwise null . * @ throws InstantiationException * @ throws IllegalAccessException * @ throws SQLException */ public T fetchSingleOrNull ( ) throws InstantiationException , IllegalAccessException , SQLException { } }
if ( ! finished ) { T result = primitive ? fetchSinglePrimitiveInternal ( ) : fetchSingleInternal ( ) ; next ( ) ; return result ; } else { return null ; }
public class JSONWriter { /** * Begin appending a new array . < p > * All values until the balancing * < code > endArray < / code > will be appended to this array . The * < code > endArray < / code > method must be called to mark the array ' s end . < p > * @ return this * @ throws JSONException if the nesting is too deep , or if the object is * started in the wrong place ( for example as a key or after the end of the * outermost array or object ) */ public JSONWriter array ( ) throws JSONException { } }
if ( ( m_mode == 'i' ) || ( m_mode == 'o' ) || ( m_mode == 'a' ) ) { push ( 'a' ) ; append ( "[" ) ; m_comma = false ; return this ; } throw new JSONException ( "Misplaced array." ) ;
public class JmsUtils { /** * Close a message producer . * @ param producer */ public static void closeQuietly ( final MessageProducer producer ) { } }
if ( producer != null ) { try { producer . close ( ) ; } catch ( JMSException je ) { if ( je . getCause ( ) instanceof InterruptedException ) { LOG . trace ( "ActiveMQ caught and wrapped InterruptedException" ) ; } if ( je . getCause ( ) instanceof InterruptedIOException ) { LOG . trace ( "ActiveMQ caught and wrapped InterruptedIOException" ) ; } else { LOG . warnDebug ( je , "While closing producer" ) ; } } }
public class AutoBytePool { /** * 设置缓冲区大小 * @ param size 大小 */ private void resizeBuf ( int size ) { } }
int capacity ; if ( size >= _capacity * 2 ) { capacity = size ; } else { capacity = 1 ; while ( capacity < size ) { capacity <<= 1 ; } } byte [ ] buf = new byte [ capacity ] ; if ( _size > 0 ) { System . arraycopy ( _buf , 0 , buf , 0 , _size ) ; } _buf = buf ; _capacity = capacity ;
public class UserHousingComplexUnit { /** * Returns the entity with the required fields for an insert set . * @ return */ public UserHousingComplexUnit instantiateForInsert ( ) { } }
UserHousingComplexUnit entity = new UserHousingComplexUnit ( ) ; entity . setIsDeleted ( Boolean . FALSE ) ; return entity ;
public class SAXSerializer { /** * Generate a text event . * @ param mRtx * Read Transaction . */ private void generateText ( final INodeReadTrx paramRtx ) { } }
try { mContHandler . characters ( paramRtx . getValueOfCurrentNode ( ) . toCharArray ( ) , 0 , paramRtx . getValueOfCurrentNode ( ) . length ( ) ) ; } catch ( final SAXException exc ) { exc . printStackTrace ( ) ; }
public class IOUtil { /** * Recurse in the folder to get the list all files and folders of all non svn files * @ param folder the folder to parse */ @ SuppressWarnings ( "unchecked" ) public Collection < String > listFolders ( File folder ) { } }
IOFileFilter ioFileFilter = FileFilterUtils . makeSVNAware ( FileFilterUtils . makeCVSAware ( FileFilterUtils . trueFileFilter ( ) ) ) ; Collection < File > files = FileUtils . listFiles ( folder , FileFilterUtils . fileFileFilter ( ) , ioFileFilter ) ; Set < String > ret = newTreeSet ( ) ; for ( File file : files ) { ret . add ( file . getParentFile ( ) . getAbsolutePath ( ) ) ; } return ret ;
public class DateTimeUtils { /** * Add / Subtract the specified amount of days to the given { @ link Calendar } . * The returned { @ link Calendar } has its fields synced . * @ param origin * @ param value * @ return * @ since 0.9.2 */ public static Calendar addDays ( Calendar origin , int value ) { } }
Calendar cal = sync ( ( Calendar ) origin . clone ( ) ) ; cal . add ( Calendar . DATE , value ) ; return sync ( cal ) ;
public class SoapWebServiceAdapter { /** * Overriding this method affords the opportunity to parse the response and * populate process variables as needed . */ @ Override public void onSuccess ( String response ) throws ActivityException , ConnectionException , AdapterException { } }
try { // set the variable value based on the unwrapped soap content soapResponse = getSoapResponse ( response ) ; Node childElem = unwrapSoapResponse ( soapResponse ) ; String responseXml = DomHelper . toXml ( childElem ) ; String responseVarName = getAttributeValue ( RESPONSE_VARIABLE ) ; if ( responseVarName == null ) throw new AdapterException ( "Missing attribute: " + RESPONSE_VARIABLE ) ; String responseVarType = getParameterType ( responseVarName ) ; if ( ! VariableTranslator . isDocumentReferenceVariable ( getPackage ( ) , responseVarType ) ) throw new AdapterException ( "Response variable must be a DocumentReference: " + responseVarName ) ; if ( responseVarType . equals ( StringDocument . class . getName ( ) ) ) { setParameterValueAsDocument ( responseVarName , responseVarType , responseXml ) ; } else { com . centurylink . mdw . variable . VariableTranslator varTrans = VariableTranslator . getTranslator ( getPackage ( ) , responseVarType ) ; if ( ! ( varTrans instanceof XmlDocumentTranslator ) ) throw new AdapterException ( "Unsupported response variable type: " + responseVarType + " (must implement XmlDocumentTranslator)" ) ; XmlDocumentTranslator xmlDocTrans = ( XmlDocumentTranslator ) varTrans ; Object responseObj = xmlDocTrans . fromDomNode ( childElem ) ; setParameterValueAsDocument ( responseVarName , responseVarType , responseObj ) ; } } catch ( Exception ex ) { throw new ActivityException ( ex . getMessage ( ) , ex ) ; }
public class DefaultDataStore { /** * Convenience call to { @ link # resolve ( Record , ReadConsistency , boolean ) } which always schedules asynchronous * compaction is applicable . */ private Resolved resolve ( Record record , ReadConsistency consistency ) { } }
return resolve ( record , consistency , true ) ;
public class BasicEvaluationCtx { /** * Private helper that figures out if we need to resolve new values , * and returns either the current moment ( if we ' re not caching ) or * - 1 ( if we are caching ) */ private synchronized Date dateTimeHelper ( ) { } }
// if we already have current values , then we can stop ( note this // always means that we ' re caching ) if ( currentTime != null ) return null ; // get the current moment Date time = new Date ( ) ; // if we ' re not caching then we just return the current moment if ( ! useCachedEnvValues ) { return time ; } else { // we ' re caching , so resolve all three values , making sure // to use clean copies of the date object since it may be // modified when creating the attributes currentTime = new TimeAttribute ( time ) ; currentDate = new DateAttribute ( time ) ; currentDateTime = new DateTimeAttribute ( time ) ; return null ; }
public class UtilFile { /** * It solves the problem in xml responses because some fullPaths * are file : / / / opt / . . . and other are like / opt / . . . * @ param fullPath * @ return */ public static String getNmtAbsolutePath ( String fullPath ) { } }
String absolutePath = "" ; if ( fullPath . startsWith ( "file://" ) ) { absolutePath = fullPath . replace ( "file://" , "" ) ; } else { absolutePath = fullPath ; } return absolutePath ;
public class InterfaceService { /** * Allows to smoothly close the application by hiding the current screen and calling * { @ link com . badlogic . gdx . Application # exit ( ) } . */ public void exitApplication ( ) { } }
if ( currentController != null ) { currentController . hide ( Actions . sequence ( hidingActionProvider . provideAction ( currentController , null ) , Actions . run ( CommonActionRunnables . getApplicationClosingRunnable ( ) ) ) ) ; } else { Gdx . app . exit ( ) ; }
public class SoftDeleteHandler { /** * Set up / do the local criteria . * @ param strbFilter The SQL query string to add to . * @ param bIncludeFileName Include the file name with this query ? * @ param vParamList The param list to add the raw data to ( for prepared statements ) . * @ return True if you should not skip this record ( does a check on the local data ) . */ public boolean doLocalCriteria ( StringBuffer strbFilter , boolean bIncludeFileName , Vector < BaseField > vParamList ) { } }
boolean bDontSkip = super . doLocalCriteria ( strbFilter , bIncludeFileName , vParamList ) ; if ( bDontSkip == true ) { if ( m_bFilterThisRecord ) bDontSkip = ! this . isRecordSoftDeleted ( ) ; // If set , skip it ! } return bDontSkip ; // Don ' t skip ( no criteria )
public class AbstractStandardTransformationOperation { /** * Generates a matcher for the given valueString with the given regular expression . */ protected Matcher generateMatcher ( String regex , String valueString ) throws TransformationOperationException { } }
if ( regex == null ) { throw new TransformationOperationException ( "No regex defined. The step will be skipped." ) ; } try { Pattern pattern = Pattern . compile ( regex ) ; return pattern . matcher ( valueString ) ; } catch ( PatternSyntaxException e ) { String message = String . format ( "Given regex string %s can't be compiled. The step will be skipped." , regex ) ; logger . warn ( message ) ; throw new TransformationOperationException ( message ) ; }
public class BaseCrawler { /** * If the HTTP HEAD request was redirected , it returns the final redirected URL . If not , it * returns the original URL of the candidate . * @ param context the current HTTP client context * @ param candidateUrl the URL of the candidate * @ return the final response URL */ private static String getFinalResponseUrl ( final HttpClientContext context , final String candidateUrl ) { } }
List < URI > redirectLocations = context . getRedirectLocations ( ) ; if ( redirectLocations != null ) { return redirectLocations . get ( redirectLocations . size ( ) - 1 ) . toString ( ) ; } return candidateUrl ;
public class BucketManager { /** * 复制文件 , 要求空间在同一账号下 , 可以设置force参数为true强行覆盖空间已有同名文件 * @ param fromBucket 源空间名称 * @ param fromFileKey 源文件名称 * @ param toBucket 目的空间名称 * @ param toFileKey 目的文件名称 * @ param force 强制覆盖空间中已有同名 ( 和 toFileKey 相同 ) 的文件 * @ throws QiniuException */ public Response copy ( String fromBucket , String fromFileKey , String toBucket , String toFileKey , boolean force ) throws QiniuException { } }
String from = encodedEntry ( fromBucket , fromFileKey ) ; String to = encodedEntry ( toBucket , toFileKey ) ; String path = String . format ( "/copy/%s/%s/force/%s" , from , to , force ) ; return rsPost ( fromBucket , path , null ) ;
public class SegmentFelzenszwalbHuttenlocher04 { /** * Look at the remaining regions and if there are any small ones marge them into a larger region */ protected void mergeSmallRegions ( ) { } }
for ( int i = 0 ; i < edgesNotMatched . size ( ) ; i ++ ) { Edge e = edgesNotMatched . get ( i ) ; int rootA = find ( e . indexA ) ; int rootB = find ( e . indexB ) ; // see if they are already part of the same segment if ( rootA == rootB ) continue ; int sizeA = regionSize . get ( rootA ) ; int sizeB = regionSize . get ( rootB ) ; // merge if one of the regions is too small if ( sizeA < minimumSize || sizeB < minimumSize ) { // Point everything towards rootA graph . data [ e . indexB ] = rootA ; graph . data [ rootB ] = rootA ; // Update the size of regionA regionSize . data [ rootA ] = sizeA + sizeB ; } }
public class DownloadProperties { /** * Builds a new DownloadProperties for downloading Galaxy from galaxy - dist . * @ param destination The destination directory to store Galaxy , null if a directory * should be chosen by default . * @ param revision The revision to use for Galaxy . * @ return A new DownloadProperties for downloading Galaxy from galaxy - dist . */ @ Deprecated public static DownloadProperties forGalaxyDist ( final File destination , String revision ) { } }
return new DownloadProperties ( GALAXY_DIST_REPOSITORY_URL , BRANCH_STABLE , revision , destination ) ;
public class FsParserAbstract { /** * Update the job metadata * @ param jobName job name * @ param scanDate last date we scan the dirs * @ throws Exception In case of error */ private void updateFsJob ( String jobName , LocalDateTime scanDate ) throws Exception { } }
// We need to round that latest date to the lower second and // remove 2 seconds . // See # 82 : https : / / github . com / dadoonet / fscrawler / issues / 82 scanDate = scanDate . minus ( 2 , ChronoUnit . SECONDS ) ; FsJob fsJob = FsJob . builder ( ) . setName ( jobName ) . setLastrun ( scanDate ) . setIndexed ( stats . getNbDocScan ( ) ) . setDeleted ( stats . getNbDocDeleted ( ) ) . build ( ) ; fsJobFileHandler . write ( jobName , fsJob ) ;
public class CreateRobotApplicationResult { /** * The sources of the robot application . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setSources ( java . util . Collection ) } or { @ link # withSources ( java . util . Collection ) } if you want to override * the existing values . * @ param sources * The sources of the robot application . * @ return Returns a reference to this object so that method calls can be chained together . */ public CreateRobotApplicationResult withSources ( Source ... sources ) { } }
if ( this . sources == null ) { setSources ( new java . util . ArrayList < Source > ( sources . length ) ) ; } for ( Source ele : sources ) { this . sources . add ( ele ) ; } return this ;
public class AesKit { /** * AES解密 * @ param base64Data * @ param key * @ return * @ throws Exception */ public static String decryptData ( String base64Data , String key ) throws Exception { } }
Cipher cipher = Cipher . getInstance ( ALGORITHM_MODE_PADDING ) ; cipher . init ( Cipher . DECRYPT_MODE , new SecretKeySpec ( HashKit . md5 ( key ) . toLowerCase ( ) . getBytes ( ) , ALGORITHM ) ) ; return new String ( cipher . doFinal ( Base64Kit . decode ( base64Data ) ) ) ;
public class BottomSheetDialog { /** * Dismiss Dialog immediately without showing out animation . */ public void dismissImmediately ( ) { } }
super . dismiss ( ) ; if ( mAnimation != null ) mAnimation . cancel ( ) ; if ( mHandler != null ) mHandler . removeCallbacks ( mDismissAction ) ;
public class ObjectMapperCreator { /** * Creates the implementation of the interface denoted by interfaceClass and extending { @ link ObjectMapper } * @ param interfaceClass the interface to generate an implementation * @ return the fully qualified name of the created class * @ throws com . google . gwt . core . ext . UnableToCompleteException if any . */ public String create ( JClassType interfaceClass ) throws UnableToCompleteException { } }
// We concatenate the name of all the enclosing class . StringBuilder builder = new StringBuilder ( interfaceClass . getSimpleSourceName ( ) + "Impl" ) ; JClassType enclosingType = interfaceClass . getEnclosingType ( ) ; while ( null != enclosingType ) { builder . insert ( 0 , enclosingType . getSimpleSourceName ( ) + "_" ) ; enclosingType = enclosingType . getEnclosingType ( ) ; } String mapperClassSimpleName = builder . toString ( ) ; String packageName = interfaceClass . getPackage ( ) . getName ( ) ; String qualifiedMapperClassName = packageName + "." + mapperClassSimpleName ; PrintWriter printWriter = getPrintWriter ( packageName , mapperClassSimpleName ) ; // The class already exists , no need to continue . if ( printWriter == null ) { return qualifiedMapperClassName ; } try { // Extract the type of the object to map . JClassType mappedTypeClass = extractMappedType ( interfaceClass ) ; boolean reader = typeOracle . isObjectReader ( interfaceClass ) ; boolean writer = typeOracle . isObjectWriter ( interfaceClass ) ; Class < ? > abstractClass ; if ( reader ) { if ( writer ) { abstractClass = AbstractObjectMapper . class ; } else { abstractClass = AbstractObjectReader . class ; } } else { abstractClass = AbstractObjectWriter . class ; } TypeSpec . Builder mapperBuilder = TypeSpec . classBuilder ( mapperClassSimpleName ) . addModifiers ( Modifier . PUBLIC , Modifier . FINAL ) . addSuperinterface ( typeName ( interfaceClass ) ) . superclass ( parameterizedName ( abstractClass , mappedTypeClass ) ) . addMethod ( buildConstructor ( mappedTypeClass ) ) ; if ( reader ) { mapperBuilder . addMethod ( buildNewDeserializerMethod ( mappedTypeClass ) ) ; } if ( writer ) { mapperBuilder . addMethod ( buildNewSerializerMethod ( mappedTypeClass ) ) ; } write ( packageName , mapperBuilder . build ( ) , printWriter ) ; } finally { printWriter . close ( ) ; } return qualifiedMapperClassName ;
public class ObjectUtil { /** * 实例化一个类 , 如果不成功 , 返回null * @ param clsName * @ return */ public static Object tryInstance ( String clsName , ClassLoader loader ) { } }
try { return instance ( clsName , loader ) ; } catch ( Exception ex ) { return null ; }
public class JibxConvertToNative { /** * Convert this tree to a DOM object . * Currently this is lame because I convert the tree to text , then to DOM . * In the future , jaxb will be able to convert directly . * @ return The dom tree . */ public Node getDOM ( ) { } }
Object root = m_message . getRawData ( ) ; try { String packageName = ( String ) ( ( TrxMessageHeader ) this . getMessage ( ) . getMessageHeader ( ) ) . get ( SOAPMessageTransport . JIBX_PACKAGE_NAME ) ; String bindingName = ( String ) ( ( TrxMessageHeader ) this . getMessage ( ) . getMessageHeader ( ) ) . get ( SOAPMessageTransport . JIBX_BINDING_NAME ) ; if ( bindingName == null ) { packageName = root . getClass ( ) . getName ( ) ; packageName = packageName . substring ( 0 , packageName . lastIndexOf ( '.' ) ) ; ( ( TrxMessageHeader ) this . getMessage ( ) . getMessageHeader ( ) ) . put ( SOAPMessageTransport . JIBX_PACKAGE_NAME , packageName ) ; } Node node = null ; // DocumentBuilder db = Util . getDocumentBuilder ( ) ; Document doc = null ; IMarshallingContext mctx = JibxContexts . getJIBXContexts ( ) . getMarshaller ( packageName , bindingName ) ; if ( mctx == null ) return null ; synchronized ( mctx ) { // Since the marshaller is shared ( may want to tweek this for multi - cpu implementations ) // ? DomElementMapper domElementMapper = new DomElementMapper ( ) ; // ? m . marshalDocument ( domElementMapper ) ; // ? domElementMapper . marshal ( root , m ) ; // ? IBindingFactory bfact = BindingDirectory . getFactory ( clazz ) ; // ? IMarshallingContext mctx = bfact . createMarshallingContext ( ) ; String [ ] namespaces = JibxContexts . getJIBXContexts ( ) . get ( packageName , bindingName ) . getFactory ( ) . getNamespaces ( ) ; JDOMWriter jdomWriter = new JDOMWriter ( namespaces ) ; mctx . setXmlWriter ( jdomWriter ) ; mctx . marshalDocument ( root ) ; mctx . endDocument ( ) ; org . jdom . Document jdoc = jdomWriter . getDocument ( ) ; DOMOutputter dout = new DOMOutputter ( ) ; doc = dout . output ( jdoc ) ; } node = doc . getDocumentElement ( ) ; if ( node != null ) return node ; } catch ( JiBXException ex ) { ex . printStackTrace ( ) ; } catch ( java . lang . IllegalArgumentException ex ) { ex . printStackTrace ( ) ; } catch ( JDOMException e ) { e . printStackTrace ( ) ; } return super . getDOM ( ) ;
public class Utility { /** * Get the domain name from this URL . * @ param strDomain * @ return */ public static String getDomainFromURL ( String strURL , String strContextPathAtEnd , boolean includePortNumber ) { } }
String strDomain = strURL ; if ( strDomain . indexOf ( ':' ) < 8 ) strDomain = strDomain . substring ( strDomain . indexOf ( ':' ) + 1 ) ; // Get rid of protocol if ( strDomain . indexOf ( "//" ) == 0 ) strDomain = strDomain . substring ( 2 ) ; // Get rid of ' / / ' if ( strDomain . indexOf ( '?' ) != - 1 ) strDomain = strDomain . substring ( 0 , strDomain . indexOf ( '?' ) ) ; // Get rid of params int iEndDomain = strDomain . indexOf ( '/' ) ; if ( iEndDomain == - 1 ) iEndDomain = strDomain . length ( ) ; if ( strDomain . lastIndexOf ( Constants . DEFAULT_SERVLET ) >= iEndDomain ) strDomain = strDomain . substring ( 0 , strDomain . lastIndexOf ( Constants . DEFAULT_SERVLET ) - 1 ) ; // Strip servlet name if ( ( strDomain . indexOf ( ':' ) != - 1 ) && ( includePortNumber == false ) ) strDomain = strDomain . substring ( 0 , strDomain . indexOf ( ':' ) ) ; // Get rid of port number else strDomain = strDomain . substring ( 0 , iEndDomain ) ; if ( strContextPathAtEnd != null ) { int iStartIndex = strURL . indexOf ( strDomain ) ; int iContextIndex = strURL . indexOf ( strContextPathAtEnd , iStartIndex + strDomain . length ( ) ) ; if ( iContextIndex != - 1 ) { // Always iContextIndex = iContextIndex + strContextPathAtEnd . length ( ) ; strDomain = strURL . substring ( iStartIndex , iContextIndex ) ; if ( ! strDomain . endsWith ( System . getProperty ( "file.separator" ) ) ) strDomain = strDomain + System . getProperty ( "file.separator" ) ; } } return strDomain ;
public class Favorites { /** * Remove an item as a favorite for a user * Fires { @ link FavoriteListener # fireOnRemoveFavourite ( Item , User ) } * @ param user to remove the favorite from * @ param item to favorite * @ throws FavoriteException */ public static void removeFavorite ( @ Nonnull User user , @ Nonnull Item item ) throws FavoriteException { } }
try { if ( isFavorite ( user , item ) ) { FavoriteUserProperty fup = user . getProperty ( FavoriteUserProperty . class ) ; fup . removeFavorite ( item . getFullName ( ) ) ; FavoriteListener . fireOnRemoveFavourite ( item , user ) ; } else { throw new FavoriteException ( "Favourite is already unset for User: <" + user . getFullName ( ) + "> Item: <" + item . getFullName ( ) + ">" ) ; } } catch ( IOException e ) { throw new FavoriteException ( "Could not remove Favorite. User: <" + user . getFullName ( ) + "> Item: <" + item . getFullName ( ) + ">" , e ) ; }
public class SessionUtil { /** * Open a new session * @ param loginInput login information * @ return information get after login such as token information * @ throws SFException if unexpected uri syntax * @ throws SnowflakeSQLException if failed to establish connection with snowflake */ static public LoginOutput openSession ( LoginInput loginInput ) throws SFException , SnowflakeSQLException { } }
AssertUtil . assertTrue ( loginInput . getServerUrl ( ) != null , "missing server URL for opening session" ) ; AssertUtil . assertTrue ( loginInput . getAppId ( ) != null , "missing app id for opening session" ) ; AssertUtil . assertTrue ( loginInput . getLoginTimeout ( ) >= 0 , "negative login timeout for opening session" ) ; final ClientAuthnDTO . AuthenticatorType authenticator = getAuthenticator ( loginInput ) ; if ( ! authenticator . equals ( ClientAuthnDTO . AuthenticatorType . OAUTH ) ) { // OAuth does not require a username AssertUtil . assertTrue ( loginInput . getUserName ( ) != null , "missing user name for opening session" ) ; } if ( authenticator . equals ( ClientAuthnDTO . AuthenticatorType . EXTERNALBROWSER ) ) { // force to set the flag . loginInput . sessionParameters . put ( CLIENT_STORE_TEMPORARY_CREDENTIAL , true ) ; } else { // TODO : patch for now . We should update mergeProperteis // to normalize all parameters using STRING _ PARAMS , INT _ PARAMS and // BOOLEAN _ PARAMS . Object value = loginInput . sessionParameters . get ( CLIENT_STORE_TEMPORARY_CREDENTIAL ) ; if ( value != null ) { loginInput . sessionParameters . put ( CLIENT_STORE_TEMPORARY_CREDENTIAL , asBoolean ( value ) ) ; } } boolean isClientStoreTemporaryCredential = asBoolean ( loginInput . sessionParameters . get ( CLIENT_STORE_TEMPORARY_CREDENTIAL ) ) ; LoginOutput loginOutput ; if ( isClientStoreTemporaryCredential && ( loginOutput = readTemporaryCredential ( loginInput ) ) != null ) { return loginOutput ; } return newSession ( loginInput ) ;
public class HDFSStorage { /** * Gets the full HDFS path when sealed . */ private Path getSealedFilePath ( String segmentName ) { } }
Preconditions . checkState ( segmentName != null && segmentName . length ( ) > 0 , "segmentName must be non-null and non-empty" ) ; return new Path ( String . format ( NAME_FORMAT , getPathPrefix ( segmentName ) , SEALED ) ) ;