signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class BuildableType_Builder { /** * Copies values from { @ code template } , skipping unset properties . * @ return this { @ code Builder } object */ public BuildableType . Builder mergeFrom ( BuildableType . Builder template ) { } }
// Upcast to access private fields ; otherwise , oddly , we get an access violation . BuildableType_Builder base = template ; BuildableType_Builder defaults = new BuildableType . Builder ( ) ; if ( ! base . _unsetProperties . contains ( Property . TYPE ) && ( defaults . _unsetProperties . contains ( Property . TYPE ) || ! Objects . equals ( template . type ( ) , defaults . type ( ) ) ) ) { type ( template . type ( ) ) ; } if ( ! base . _unsetProperties . contains ( Property . BUILDER_TYPE ) && ( defaults . _unsetProperties . contains ( Property . BUILDER_TYPE ) || ! Objects . equals ( template . builderType ( ) , defaults . builderType ( ) ) ) ) { builderType ( template . builderType ( ) ) ; } if ( ! base . _unsetProperties . contains ( Property . MERGE_BUILDER ) && ( defaults . _unsetProperties . contains ( Property . MERGE_BUILDER ) || ! Objects . equals ( template . mergeBuilder ( ) , defaults . mergeBuilder ( ) ) ) ) { mergeBuilder ( template . mergeBuilder ( ) ) ; } if ( ! base . _unsetProperties . contains ( Property . PARTIAL_TO_BUILDER ) && ( defaults . _unsetProperties . contains ( Property . PARTIAL_TO_BUILDER ) || ! Objects . equals ( template . partialToBuilder ( ) , defaults . partialToBuilder ( ) ) ) ) { partialToBuilder ( template . partialToBuilder ( ) ) ; } if ( ! base . _unsetProperties . contains ( Property . BUILDER_FACTORY ) && ( defaults . _unsetProperties . contains ( Property . BUILDER_FACTORY ) || ! Objects . equals ( template . builderFactory ( ) , defaults . builderFactory ( ) ) ) ) { builderFactory ( template . builderFactory ( ) ) ; } if ( ! base . _unsetProperties . contains ( Property . SUPPRESS_UNCHECKED ) && ( defaults . _unsetProperties . contains ( Property . SUPPRESS_UNCHECKED ) || ! Objects . equals ( template . suppressUnchecked ( ) , defaults . suppressUnchecked ( ) ) ) ) { suppressUnchecked ( template . suppressUnchecked ( ) ) ; } return ( BuildableType . Builder ) this ;
public class FlowController { /** * Resolve the given action name to a URI . This version assumes that the ActionServlet * class should be { @ link PageFlowActionServlet } . * Note : this method invokes the full action - processing cycle on a { @ link ScopedRequest } . Use * { @ link FlowController # resolveAction } to resolve the URI for an action in the current page flow . * @ deprecated Use { @ link PageFlowUtils # strutsLookup } instead . This method will be removed in v1.1. */ public static ActionResult lookup ( String actionName , ServletContext context , HttpServletRequest request , HttpServletResponse response ) throws Exception { } }
return PageFlowUtils . strutsLookup ( context , request , response , actionName , null ) ;
public class TableFormBuilder { /** * Adds a labeled separator to the form * @ param text * the key for the label . Must not be null * @ param attributes * optional attributes . See { @ link TableLayoutBuilder } for syntax details */ public JComponent addSeparator ( String text , String attributes ) { } }
JComponent separator = getComponentFactory ( ) . createLabeledSeparator ( text ) ; getLayoutBuilder ( ) . cell ( separator , attributes ) ; return separator ;
public class StringUtils { /** * Parse { @ code props } attribute specializations * @ param domains input domain * @ return list of { @ code props } attribute specializations */ public static QName [ ] [ ] getExtProps ( final String domains ) { } }
// FIXME Dont ' mix arrays and collections final List < QName [ ] > propsBuffer = new ArrayList < > ( ) ; int propsStart = domains . indexOf ( "a(" + ATTRIBUTE_NAME_PROPS ) ; int propsEnd = domains . indexOf ( ")" , propsStart ) ; while ( propsStart != - 1 && propsEnd != - 1 ) { final String propPath = domains . substring ( propsStart + 2 , propsEnd ) . trim ( ) ; final List < QName > propList = Stream . of ( propPath . split ( "\\s+" ) ) . map ( QName :: valueOf ) . collect ( Collectors . toList ( ) ) ; propsBuffer . add ( propList . toArray ( new QName [ 0 ] ) ) ; propsStart = domains . indexOf ( "a(" + ATTRIBUTE_NAME_PROPS , propsEnd ) ; propsEnd = domains . indexOf ( ")" , propsStart ) ; } return propsBuffer . toArray ( new QName [ propsBuffer . size ( ) ] [ ] ) ;
public class AmBaseBolt { /** * Use anchor function ( child message failed . notify fail to parent message . ) , MessageKey ( Use key history ' s value ) . < br > * Send message to downstream component . < br > * Use following situation . * < ol > * < li > Use this class ' s key history function . < / li > * < li > Use storm ' s fault detect function . < / li > * < / ol > * @ param message sending message * @ param messageKey MessageKey ( Use key history ' s value ) */ protected void emit ( StreamMessage message , Object messageKey ) { } }
KeyHistory newHistory = null ; if ( this . recordHistory ) { newHistory = createKeyRecorededHistory ( this . executingKeyHistory , messageKey ) ; } else { newHistory = createKeyRecorededHistory ( this . executingKeyHistory ) ; } message . getHeader ( ) . setHistory ( newHistory ) ; getCollector ( ) . emit ( this . getExecutingTuple ( ) , new Values ( "" , message ) ) ;
public class ContractJavaFileManager { /** * Returns a list of paths associated with { @ code location } , or * { @ code null } . */ @ Requires ( "location != null" ) public List < ? extends File > getLocation ( Location location ) { } }
Iterable < ? extends File > path = fileManager . getLocation ( location ) ; if ( path == null ) { return null ; } ArrayList < File > locations = new ArrayList < File > ( ) ; for ( File entry : path ) { locations . add ( entry ) ; } return locations ;
public class DataBlockEngine { /** * 写数据 */ private DataAppendResult append ( StoreTxLogPosition storeTxLogPosition , byte [ ] dataBytes ) throws IOException { } }
DataBlock writeBlock = getWriteDataBlock ( ) ; try { return writeBlock . append ( storeTxLogPosition , dataBytes ) ; } catch ( CapacityNotEnoughException e ) { if ( ! readonlyBlocks . contains ( writeBlock ) ) { readonlyBlocks . add ( writeBlock ) ; } writableBlocks . remove ( writeBlock ) ; return append ( storeTxLogPosition , dataBytes ) ; }
public class DepsAnalyzer { /** * Returns the archives for reporting that has matching dependences . * If - - require is set , they should be excluded . */ Set < Archive > archives ( ) { } }
if ( filter . requiresFilter ( ) . isEmpty ( ) ) { return archives . stream ( ) . filter ( this :: include ) . filter ( Archive :: hasDependences ) . collect ( Collectors . toSet ( ) ) ; } else { // use the archives that have dependences and not specified in - - require return archives . stream ( ) . filter ( this :: include ) . filter ( source -> ! filter . requiresFilter ( ) . contains ( source . getName ( ) ) ) . filter ( source -> source . getDependencies ( ) . map ( finder :: locationToArchive ) . anyMatch ( a -> a != source ) ) . collect ( Collectors . toSet ( ) ) ; }
public class StandardSgroupGenerator { /** * Generate the Sgroup elements for the provided atom contains . * @ param container molecule * @ return Sgroup rendering elements */ IRenderingElement generateSgroups ( IAtomContainer container , AtomSymbol [ ] symbols ) { } }
ElementGroup result = new ElementGroup ( ) ; List < Sgroup > sgroups = container . getProperty ( CDKConstants . CTAB_SGROUPS ) ; if ( sgroups == null || sgroups . isEmpty ( ) ) return result ; Map < IAtom , AtomSymbol > symbolMap = new HashMap < > ( ) ; for ( int i = 0 ; i < symbols . length ; i ++ ) { if ( symbols [ i ] != null ) symbolMap . put ( container . getAtom ( i ) , symbols [ i ] ) ; } for ( Sgroup sgroup : sgroups ) { switch ( sgroup . getType ( ) ) { case CtabAbbreviation : result . add ( generateAbbreviationSgroup ( container , sgroup ) ) ; break ; case CtabMultipleGroup : result . add ( generateMultipleSgroup ( sgroup , symbolMap ) ) ; break ; case CtabAnyPolymer : case CtabMonomer : case CtabCrossLink : case CtabCopolymer : case CtabStructureRepeatUnit : case CtabMer : case CtabGraft : case CtabModified : result . add ( generatePolymerSgroup ( sgroup , symbolMap ) ) ; break ; case CtabComponent : case CtabMixture : case CtabFormulation : result . add ( generateMixtureSgroup ( sgroup ) ) ; break ; case CtabGeneric : // not strictly a polymer but okay to draw as one result . add ( generatePolymerSgroup ( sgroup , null ) ) ; break ; } } return result ;
public class ShufflerServlet { /** * Notifies the caller that the job has completed . */ private static void enqueueCallbackTask ( final ShufflerParams shufflerParams , final String url , final String taskName ) { } }
RetryHelper . runWithRetries ( callable ( new Runnable ( ) { @ Override public void run ( ) { String hostname = ModulesServiceFactory . getModulesService ( ) . getVersionHostname ( shufflerParams . getCallbackModule ( ) , shufflerParams . getCallbackVersion ( ) ) ; Queue queue = QueueFactory . getQueue ( shufflerParams . getCallbackQueue ( ) ) ; String separater = shufflerParams . getCallbackPath ( ) . contains ( "?" ) ? "&" : "?" ; try { queue . add ( TaskOptions . Builder . withUrl ( shufflerParams . getCallbackPath ( ) + separater + url ) . method ( TaskOptions . Method . GET ) . header ( "Host" , hostname ) . taskName ( taskName ) ) ; } catch ( TaskAlreadyExistsException e ) { // harmless dup . } } } ) , RETRY_PARAMS , EXCEPTION_HANDLER ) ;
public class WindowOperator { /** * Retrieves the { @ link MergingWindowSet } for the currently active key . * The caller must ensure that the correct key is set in the state backend . * < p > The caller must also ensure to properly persist changes to state using * { @ link MergingWindowSet # persist ( ) } . */ protected MergingWindowSet < W > getMergingWindowSet ( ) throws Exception { } }
@ SuppressWarnings ( "unchecked" ) MergingWindowAssigner < ? super IN , W > mergingAssigner = ( MergingWindowAssigner < ? super IN , W > ) windowAssigner ; return new MergingWindowSet < > ( mergingAssigner , mergingSetsState ) ;
public class AbstractDataSource { /** * ( non - Javadoc ) * @ see android . database . sqlite . SQLiteOpenHelper # close ( ) */ @ Override public void close ( ) { } }
lockDb . lock ( ) ; try { if ( openCounter . decrementAndGet ( ) <= 0 ) { if ( ! this . options . inMemory ) { // Closing database if ( database != null ) { clearCompiledStatements ( ) ; database . close ( ) ; } database = null ; } if ( logEnabled ) Logger . info ( "database CLOSED (%s) (connections: %s)" , status . get ( ) , openCounter . intValue ( ) ) ; } else { if ( logEnabled ) Logger . info ( "database RELEASED (%s) (connections: %s)" , status . get ( ) , openCounter . intValue ( ) ) ; } } finally { switch ( status . get ( ) ) { case READ_AND_WRITE_OPENED : if ( database == null ) status . set ( TypeStatus . CLOSED ) ; lockReadWriteAccess . unlock ( ) ; lockDb . unlock ( ) ; break ; case READ_ONLY_OPENED : if ( database == null ) status . set ( TypeStatus . CLOSED ) ; lockReadAccess . unlock ( ) ; lockDb . unlock ( ) ; break ; case CLOSED : // do nothing lockDb . unlock ( ) ; break ; default : lockDb . unlock ( ) ; throw ( new KriptonRuntimeException ( "Inconsistent status" ) ) ; } }
public class AMI { /** * private void populateImages ( @ Nonnull ProviderContext ctx , @ Nullable String accountNumber , @ Nonnull Jiterator < MachineImage > iterator , Map < String , String > extraParameters ) throws CloudException , InternalException { * APITrace . begin ( getProvider ( ) , " populateImages " ) ; * try { * Map < String , String > parameters = getProvider ( ) . getStandardParameters ( getProvider ( ) . getContext ( ) , EC2Method . DESCRIBE _ IMAGES ) ; * EC2Method method ; * NodeList blocks ; * Document doc ; * if ( accountNumber = = null ) { * accountNumber = ctx . getAccountNumber ( ) ; * if ( getProvider ( ) . getEC2Provider ( ) . isAWS ( ) ) { * parameters . put ( " Owner " , accountNumber ) ; * getProvider ( ) . putExtraParameters ( parameters , extraParameters ) ; * method = new EC2Method ( getProvider ( ) , parameters ) ; * try { * doc = method . invoke ( ) ; * catch ( EC2Exception e ) { * logger . error ( e . getSummary ( ) ) ; * throw new CloudException ( e ) ; * blocks = doc . getElementsByTagName ( " imagesSet " ) ; * for ( int i = 0 ; i < blocks . getLength ( ) ; i + + ) { * NodeList instances = blocks . item ( i ) . getChildNodes ( ) ; * for ( int j = 0 ; j < instances . getLength ( ) ; j + + ) { * Node instance = instances . item ( j ) ; * if ( instance . getNodeName ( ) . equals ( " item " ) ) { * MachineImage image = toMachineImage ( instance ) ; * if ( image ! = null ) { * iterator . push ( image ) ; * if ( getProvider ( ) . getEC2Provider ( ) . isAWS ( ) ) { * parameters = getProvider ( ) . getStandardParameters ( getProvider ( ) . getContext ( ) , EC2Method . DESCRIBE _ IMAGES ) ; * parameters . put ( " ExecutableBy " , accountNumber ) ; * getProvider ( ) . putExtraParameters ( parameters , extraParameters ) ; * method = new EC2Method ( getProvider ( ) , parameters ) ; * try { * doc = method . invoke ( ) ; * catch ( EC2Exception e ) { * logger . error ( e . getSummary ( ) ) ; * throw new CloudException ( e ) ; * blocks = doc . getElementsByTagName ( " imagesSet " ) ; * for ( int i = 0 ; i < blocks . getLength ( ) ; i + + ) { * NodeList instances = blocks . item ( i ) . getChildNodes ( ) ; * for ( int j = 0 ; j < instances . getLength ( ) ; j + + ) { * Node instance = instances . item ( j ) ; * if ( instance . getNodeName ( ) . equals ( " item " ) ) { * MachineImage image = toMachineImage ( instance ) ; * if ( image ! = null ) { * iterator . push ( image ) ; * finally { * APITrace . end ( ) ; */ @ Override public @ Nonnull MachineImage registerImageBundle ( @ Nonnull ImageCreateOptions options ) throws CloudException , InternalException { } }
APITrace . begin ( getProvider ( ) , "Image.registerImageBundle" ) ; try { if ( ! MachineImageFormat . AWS . equals ( options . getBundleFormat ( ) ) ) { throw new CloudException ( "Unsupported bundle format: " + options . getBundleFormat ( ) ) ; } if ( options . getBundleLocation ( ) == null ) { throw new OperationNotSupportedException ( "A valid bundle location in object storage was not provided" ) ; } Map < String , String > parameters = getProvider ( ) . getStandardParameters ( getProvider ( ) . getContext ( ) , EC2Method . REGISTER_IMAGE ) ; NodeList blocks ; EC2Method method ; Document doc ; parameters . put ( "ImageLocation" , options . getBundleLocation ( ) ) ; method = new EC2Method ( getProvider ( ) , parameters ) ; try { doc = method . invoke ( ) ; } catch ( EC2Exception e ) { logger . error ( e . getSummary ( ) ) ; throw new CloudException ( e ) ; } blocks = doc . getElementsByTagName ( "imageId" ) ; if ( blocks . getLength ( ) > 0 ) { Node imageIdNode = blocks . item ( 0 ) ; String id = imageIdNode . getFirstChild ( ) . getNodeValue ( ) . trim ( ) ; MachineImage img = getMachineImage ( id ) ; if ( img == null ) { throw new CloudException ( "Expected to find newly registered machine image '" + id + "', but none was found" ) ; } return img ; } throw new CloudException ( "No machine image was registered, but no error was thrown" ) ; } finally { APITrace . end ( ) ; }
public class CharsetUtil { /** * 转换字符串的字符集编码 < br > * 当以错误的编码读取为字符串时 , 打印字符串将出现乱码 。 < br > * 此方法用于纠正因读取使用编码错误导致的乱码问题 。 < br > * 例如 , 在Servlet请求中客户端用GBK编码了请求参数 , 我们使用UTF - 8读取到的是乱码 , 此时 , 使用此方法即可还原原编码的内容 * < pre > * 客户端 - 》 GBK编码 - 》 Servlet容器 - 》 UTF - 8解码 - 》 乱码 * 乱码 - 》 UTF - 8编码 - 》 GBK解码 - 》 正确内容 * < / pre > * @ param source 字符串 * @ param srcCharset 源字符集 , 默认ISO - 8859-1 * @ param destCharset 目标字符集 , 默认UTF - 8 * @ return 转换后的字符集 */ public static String convert ( String source , Charset srcCharset , Charset destCharset ) { } }
if ( null == srcCharset ) { srcCharset = StandardCharsets . ISO_8859_1 ; } if ( null == destCharset ) { destCharset = StandardCharsets . UTF_8 ; } if ( StrUtil . isBlank ( source ) || srcCharset . equals ( destCharset ) ) { return source ; } return new String ( source . getBytes ( srcCharset ) , destCharset ) ;
public class SearchQuery { /** * Adds one { @ link SearchFacet } to the query . * This is an additive operation ( the given facets are added to any facet previously requested ) , * but if an existing facet has the same name it will be replaced . * This drives the inclusion of the { @ link SearchQueryResult # facets ( ) } facets } in the { @ link SearchQueryResult } . * Note that to be faceted , a field ' s value must be stored in the FTS index . * @ param facetName the name of the facet to add ( or replace if one already exists with same name ) . * @ param facet the facet to add . */ public SearchQuery addFacet ( String facetName , SearchFacet facet ) { } }
if ( facet == null || facetName == null ) { throw new NullPointerException ( "Facet name and description must not be null" ) ; } this . facets . put ( facetName , facet ) ; return this ;
public class IOState { /** * A websocket connection has been disconnected for abnormal close reasons . * This is the low level disconnect of the socket . It could be the result of a normal close operation , from an IO error , or even from a timeout . * @ param close the close information */ public void onAbnormalClose ( CloseInfo close ) { } }
if ( LOG . isDebugEnabled ( ) ) LOG . debug ( "onAbnormalClose({})" , close ) ; ConnectionState event = null ; synchronized ( this ) { if ( this . state == ConnectionState . CLOSED ) { // already closed return ; } if ( this . state == ConnectionState . OPEN ) { this . cleanClose = false ; } this . state = ConnectionState . CLOSED ; finalClose . compareAndSet ( null , close ) ; this . inputAvailable = false ; this . outputAvailable = false ; this . closeHandshakeSource = CloseHandshakeSource . ABNORMAL ; event = this . state ; } notifyStateListeners ( event ) ;
public class SymbolType { /** * Builds a symbol type from a Java type . * @ param type * type to convert * @ param arg * reference class to take into account if the type is a generic variable . * @ param updatedTypeMapping * place to put the resolved generic variables . * @ param typeMapping * reference type mapping for generic variables . * @ return the representative symbol type * @ throws InvalidTypeException when the type cannot be loaded */ public static SymbolType valueOf ( Type type , SymbolType arg , Map < String , SymbolType > updatedTypeMapping , Map < String , SymbolType > typeMapping ) throws InvalidTypeException { } }
if ( typeMapping == null ) { typeMapping = Collections . emptyMap ( ) ; } SymbolType returnType = null ; if ( type instanceof Class < ? > ) { returnType = valueOfClass ( ( Class < ? > ) type , arg , updatedTypeMapping , typeMapping ) ; } else if ( type instanceof TypeVariable ) { return valueOfTypeVariable ( ( TypeVariable < ? > ) type , arg , updatedTypeMapping , typeMapping ) ; } else if ( type instanceof ParameterizedType ) { returnType = valueOfParameterizedType ( ( ParameterizedType ) type , arg , updatedTypeMapping , typeMapping ) ; } else if ( type instanceof GenericArrayType ) { returnType = valueOfGenericArrayType ( ( GenericArrayType ) type , arg , updatedTypeMapping , typeMapping ) ; } else if ( type instanceof WildcardType ) { returnType = valueOfWildcardType ( ( WildcardType ) type , arg , updatedTypeMapping , typeMapping ) ; } return returnType ;
public class MDateField { /** * { @ inheritDoc } */ @ Override public void setValue ( final Date newDate ) { } }
if ( newDate != null ) { super . setText ( MDateDocument . getDisplayDateFormat ( ) . format ( newDate ) ) ; } else { super . setText ( null ) ; }
public class KeystoreFactory { /** * Returns an empty KeyStore object . * @ return */ @ SneakyThrows public KeyStore createEmptyKeystore ( ) { } }
KeyStore keyStore = KeyStore . getInstance ( "JKS" ) ; keyStore . load ( null , "" . toCharArray ( ) ) ; return keyStore ;
public class AbstractResultSetWrapper { /** * { @ inheritDoc } * @ see java . sql . ResultSet # updateTimestamp ( int , java . sql . Timestamp ) */ @ Override public void updateTimestamp ( final int columnIndex , final Timestamp x ) throws SQLException { } }
wrapped . updateTimestamp ( columnIndex , x ) ;
public class DictionaryUtil { /** * Read a resource file with a list of entries ( sorted by frequency ) and use * it to create a ranked dictionary . * The dictionary must contain only lower case values for the matching to work properly . * @ param fileName the name of the file * @ return the ranked dictionary ( a { @ code HashMap } which associated a * rank to each entry */ public static Map < String , Integer > loadUnrankedDictionary ( final String fileName ) { } }
Map < String , Integer > unranked = new HashMap < > ( ) ; Set < String > unranked_set = new HashSet < > ( ) ; String path = "/dictionaries/" + fileName ; try ( InputStream is = DictionaryUtil . class . getResourceAsStream ( path ) ; BufferedReader br = new BufferedReader ( new InputStreamReader ( is , "UTF-8" ) ) ) { String line ; int i = 0 ; while ( ( line = br . readLine ( ) ) != null ) { unranked_set . add ( line ) ; i ++ ; } i = i / 2 ; for ( String value : unranked_set ) { unranked . put ( value , i ) ; } } catch ( IOException e ) { System . out . println ( "Error while reading " + fileName ) ; } return unranked ;
public class DefaultComposer { /** * ( non - Javadoc ) * @ see org . jsmpp . util . PDUComposer # deliverSm ( int , java . lang . String , byte , * byte , java . lang . String , byte , byte , java . lang . String , byte , byte , * byte , byte , byte , byte [ ] , org . jsmpp . bean . OptionalParameter [ ] ) */ public byte [ ] deliverSm ( int sequenceNumber , String serviceType , byte sourceAddrTon , byte sourceAddrNpi , String sourceAddr , byte destAddrTon , byte destAddrNpi , String destinationAddr , byte esmClass , byte protocolId , byte priorityFlag , byte registeredDelivery , byte dataCoding , byte [ ] shortMessage , OptionalParameter ... optionalParameters ) throws PDUStringException { } }
StringValidator . validateString ( serviceType , StringParameter . SERVICE_TYPE ) ; StringValidator . validateString ( sourceAddr , StringParameter . SOURCE_ADDR ) ; StringValidator . validateString ( destinationAddr , StringParameter . DESTINATION_ADDR ) ; StringValidator . validateString ( shortMessage , StringParameter . SHORT_MESSAGE ) ; PDUByteBuffer buf = new PDUByteBuffer ( SMPPConstant . CID_DELIVER_SM , 0 , sequenceNumber ) ; buf . append ( serviceType ) ; buf . append ( sourceAddrTon ) ; buf . append ( sourceAddrNpi ) ; buf . append ( sourceAddr ) ; buf . append ( destAddrTon ) ; buf . append ( destAddrNpi ) ; buf . append ( destinationAddr ) ; buf . append ( esmClass ) ; buf . append ( protocolId ) ; buf . append ( priorityFlag ) ; buf . append ( ( String ) null ) ; // schedule delivery time buf . append ( ( String ) null ) ; // validity period buf . append ( registeredDelivery ) ; buf . append ( ( byte ) 0 ) ; // replace if present flag buf . append ( dataCoding ) ; buf . append ( ( byte ) 0 ) ; // sm default msg id buf . append ( ( byte ) shortMessage . length ) ; buf . append ( shortMessage ) ; buf . appendAll ( optionalParameters ) ; ; return buf . toBytes ( ) ;
public class VirtualNetworkGatewaysInner { /** * Gets all the connections in a virtual network gateway . * @ param resourceGroupName The name of the resource group . * @ param virtualNetworkGatewayName The name of the virtual network gateway . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < List < VirtualNetworkGatewayConnectionListEntityInner > > listConnectionsAsync ( final String resourceGroupName , final String virtualNetworkGatewayName , final ListOperationCallback < VirtualNetworkGatewayConnectionListEntityInner > serviceCallback ) { } }
return AzureServiceFuture . fromPageResponse ( listConnectionsSinglePageAsync ( resourceGroupName , virtualNetworkGatewayName ) , new Func1 < String , Observable < ServiceResponse < Page < VirtualNetworkGatewayConnectionListEntityInner > > > > ( ) { @ Override public Observable < ServiceResponse < Page < VirtualNetworkGatewayConnectionListEntityInner > > > call ( String nextPageLink ) { return listConnectionsNextSinglePageAsync ( nextPageLink ) ; } } , serviceCallback ) ;
public class DenseMatrix { /** * Parses { @ link DenseMatrix } from the given Matrix Market . * @ param is the input stream in Matrix Market format * @ return a parsed matrix * @ exception IOException if an I / O error occurs . */ public static DenseMatrix fromMatrixMarket ( InputStream is ) throws IOException { } }
return Matrix . fromMatrixMarket ( is ) . to ( Matrices . DENSE ) ;
public class AssociateThreeByPairs { /** * Removes by swapping all elements with a ' c ' index of - 1 */ private void pruneMatches ( ) { } }
int index = 0 ; while ( index < matches . size ) { AssociatedTripleIndex a = matches . get ( index ) ; // not matched . Remove it from the list by copying that last element over it if ( a . c == - 1 ) { a . set ( matches . get ( matches . size - 1 ) ) ; matches . size -- ; } else { index ++ ; } }
public class LifecycleExecuter { /** * setup with a new configuration and also we have access to the deployment */ public void executeBeforeDeploy ( @ Observes BeforeDeploy event , TestClass testClass ) { } }
execute ( testClass . getMethods ( org . jboss . arquillian . container . mobicents . api . annotations . BeforeDeploy . class ) ) ;
public class StatementDML { /** * other fk references this : if constraint trigger action : other write lock */ @ Override void getTableNamesForWrite ( OrderedHashSet set ) { } }
/* A VoltDB Extension . * Base table could be null for views . */ if ( baseTable == null || baseTable . isTemp ( ) ) { return ; } set . add ( baseTable . getName ( ) ) ; for ( int i = 0 ; i < baseTable . fkPath . length ; i ++ ) { set . add ( baseTable . fkPath [ i ] . getMain ( ) . getName ( ) ) ; } getTriggerTableNames ( set , true ) ;
public class CollectionUtil { /** * Adds objects in array to the given collection * @ return the same collection which is passed as argument */ @ SuppressWarnings ( { } }
"unchecked" , "ManualArrayToCollectionCopy" } ) public static < E , T extends E > Collection < E > addAll ( Collection < E > c , T ... array ) { for ( T obj : array ) c . add ( obj ) ; return c ;
public class AJavaServiceImplementation { /** * Utility method to extract the value of @ ResponseTo for a method supposedly annotated . */ private static String getResponseToFrom ( String callerClassName , String callerMethodName ) throws ServiceExecutionException { } }
try { Class < ? > clas = Class . forName ( callerClassName ) ; Method [ ] methods = clas . getMethods ( ) ; for ( Method method : methods ) { if ( method . getName ( ) . equalsIgnoreCase ( callerMethodName ) ) { ResponseTo annotation = method . getAnnotation ( ResponseTo . class ) ; return ( null != annotation ) ? annotation . value ( ) : null ; } } } catch ( ClassNotFoundException ex ) { throw new ServiceExecutionException ( callerClassName , ex ) ; } return null ;
public class SrvLedger { /** * < p > Retrieve previous totals . < / p > * @ param pAddParam additional param * @ param pAccount account * @ param pDate1 date start * @ param pSubaccId Subaccount ID or null * @ return LedgerPrevious data * @ throws Exception - an exception */ @ Override public final LedgerPrevious retrievePrevious ( final Map < String , Object > pAddParam , final Account pAccount , final Date pDate1 , final String pSubaccId ) throws Exception { } }
getSrvBalance ( ) . recalculateAllIfNeed ( pAddParam , pDate1 ) ; LedgerPrevious result = new LedgerPrevious ( ) ; if ( this . queryPrevious == null ) { String flName = "/" + "accounting" + "/" + "ledger" // + " / " + " queryPrevious . sql " ; fast query cause error due changing subacc name + "/" + "queryPreviousSl.sql" ; this . queryPrevious = loadString ( flName ) ; } String query = queryPrevious . replace ( ":DATEBALANCE" , String . valueOf ( getSrvBalance ( ) . evalDatePeriodStartFor ( pAddParam , pDate1 ) . getTime ( ) ) ) ; query = query . replace ( ":DATE1" , String . valueOf ( pDate1 . getTime ( ) ) ) ; query = query . replace ( ":ACCID" , "'" + pAccount . getItsId ( ) + "'" ) ; String whereSubaccDebit = "" ; String whereSubaccCredit = "" ; String whereSubacc = "" ; if ( pSubaccId != null && pSubaccId . length ( ) > 0 ) { whereSubaccDebit = " and SUBACCDEBITID='" + pSubaccId + "'" ; whereSubaccCredit = " and SUBACCCREDITID='" + pSubaccId + "'" ; whereSubacc = " and SUBACCOUNT='" + pSubaccId + "'" ; } query = query . replace ( ":SUBACCDEBIT" , whereSubaccDebit ) ; query = query . replace ( ":SUBACCCREDIT" , whereSubaccCredit ) ; query = query . replace ( ":SUBACC" , whereSubacc ) ; IRecordSet < RS > recordSet = null ; try { recordSet = getSrvDatabase ( ) . retrieveRecords ( query ) ; if ( recordSet . moveToFirst ( ) ) { do { LedgerPreviousLine lpl = new LedgerPreviousLine ( ) ; String subaccName = recordSet . getString ( "SUBACC" ) ; lpl . setDebit ( BigDecimal . valueOf ( recordSet . getDouble ( "DEBIT" ) ) . setScale ( getSrvAccSettings ( ) . lazyGetAccSettings ( pAddParam ) . getCostPrecision ( ) , getSrvAccSettings ( ) . lazyGetAccSettings ( pAddParam ) . getRoundingMode ( ) ) ) ; lpl . setCredit ( BigDecimal . valueOf ( recordSet . getDouble ( "CREDIT" ) ) . setScale ( getSrvAccSettings ( ) . lazyGetAccSettings ( pAddParam ) . getCostPrecision ( ) , getSrvAccSettings ( ) . lazyGetAccSettings ( pAddParam ) . getRoundingMode ( ) ) ) ; if ( pAccount . getNormalBalanceType ( ) == ENormalBalanceType . DEBIT ) { lpl . setBalance ( lpl . getDebit ( ) . subtract ( lpl . getCredit ( ) ) ) ; } else { lpl . setBalance ( lpl . getCredit ( ) . subtract ( lpl . getDebit ( ) ) ) ; } result . getLinesMap ( ) . put ( subaccName , lpl ) ; result . setDebitAcc ( result . getDebitAcc ( ) . add ( lpl . getDebit ( ) ) ) ; result . setCreditAcc ( result . getCreditAcc ( ) . add ( lpl . getCredit ( ) ) ) ; } while ( recordSet . moveToNext ( ) ) ; } } finally { if ( recordSet != null ) { recordSet . close ( ) ; } } if ( pAccount . getNormalBalanceType ( ) == ENormalBalanceType . DEBIT ) { result . setBalanceAcc ( result . getDebitAcc ( ) . subtract ( result . getCreditAcc ( ) ) ) ; } else { result . setBalanceAcc ( result . getCreditAcc ( ) . subtract ( result . getDebitAcc ( ) ) ) ; } return result ;
public class HadoopRandomIndexingMain { /** * Executes the { @ link HadoopRandomIndexing } algorithm , processing all of * the provided input directories and writing the resulting { @ link * SemanticSpace } to the writer . */ protected void execute ( Collection < String > inputDirs , SemanticSpaceWriter writer ) throws Exception { } }
HadoopRandomIndexing hri = new HadoopRandomIndexing ( ) ; // Load the index vectors if the user has specified any if ( argOptions . hasOption ( "loadVectors" ) ) { String fileName = argOptions . getStringOption ( "loadVectors" ) ; LOGGER . info ( "loading index vectors from " + fileName ) ; Map < String , TernaryVector > wordToIndexVector = IndexVectorUtil . load ( new File ( fileName ) ) ; hri . setWordToIndexVector ( wordToIndexVector ) ; } hri . execute ( inputDirs , writer ) ;
public class UndeployCommand { /** * Sends the undeploy command to a Cadmium - Deployer war . * @ param url The uri to a Cadmium - Deployer war . * @ param warName The war to undeploy . * @ param token The Github API token used for authentication . * @ throws Exception */ public static void undeploy ( String url , String warName , String token ) throws Exception { } }
HttpClient client = httpClient ( ) ; HttpPost del = new HttpPost ( url + "/system/undeploy" ) ; addAuthHeader ( token , del ) ; del . addHeader ( "Content-Type" , MediaType . APPLICATION_JSON ) ; UndeployRequest req = new UndeployRequest ( ) ; req . setWarName ( warName ) ; del . setEntity ( new StringEntity ( new Gson ( ) . toJson ( req ) , "UTF-8" ) ) ; HttpResponse resp = client . execute ( del ) ; if ( resp . getStatusLine ( ) . getStatusCode ( ) == HttpStatus . SC_OK ) { String respStr = EntityUtils . toString ( resp . getEntity ( ) ) ; if ( ! respStr . equals ( "ok" ) ) { throw new Exception ( "Failed to undeploy " + warName ) ; } else { System . out . println ( "Undeployment of " + warName + " successful" ) ; } } else { System . err . println ( "Failed to undeploy " + warName ) ; System . err . println ( resp . getStatusLine ( ) . getStatusCode ( ) + ": " + EntityUtils . toString ( resp . getEntity ( ) ) ) ; }
public class Calendar { /** * Both firstDayOfWeek and minimalDaysInFirstWeek are locale - dependent . * They are used to figure out the week count for a specific date for * a given locale . These must be set when a Calendar is constructed . * @ param desiredLocale the given locale . */ private void setWeekCountData ( Locale desiredLocale ) { } }
/* try to get the Locale data from the cache */ int [ ] data = cachedLocaleData . get ( desiredLocale ) ; if ( data == null ) { /* cache miss */ data = new int [ 2 ] ; // BEGIN Android - changed : Use ICU4C to get week data . // data [ 0 ] = CalendarDataUtility . retrieveFirstDayOfWeek ( desiredLocale ) ; // data [ 1 ] = CalendarDataUtility . retrieveMinimalDaysInFirstWeek ( desiredLocale ) ; LocaleData localeData = LocaleData . get ( desiredLocale ) ; data [ 0 ] = localeData . firstDayOfWeek . intValue ( ) ; data [ 1 ] = localeData . minimalDaysInFirstWeek . intValue ( ) ; // END Android - changed : Use ICU4C to get week data . cachedLocaleData . putIfAbsent ( desiredLocale , data ) ; } firstDayOfWeek = data [ 0 ] ; minimalDaysInFirstWeek = data [ 1 ] ;
public class CountedCompleter { /** * If this task ' s pending count is zero , returns this task ; otherwise decrements its pending count * and returns { @ code * null } . This method is designed to be used with { @ link # nextComplete } in completion traversal * loops . * @ return this task , if pending count was zero , else { @ code null } */ public final CountedCompleter < ? > firstComplete ( ) { } }
for ( int c ; ; ) { if ( ( c = pending ) == 0 ) return this ; else if ( U . compareAndSwapInt ( this , PENDING , c , c - 1 ) ) return null ; }
public class CompHandshakeFactory { /** * Create the singleton ComponentHandshake instance . * @ exception Exception The method rethrows any Exception caught during * creaton of the singleton object . */ private static void createHandshakeInstance ( ) throws Exception { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "createHandshakeInstance" ) ; try { instance = Class . forName ( MfpConstants . COMP_HANDSHAKE_CLASS ) . newInstance ( ) ; } catch ( Exception e ) { FFDCFilter . processException ( e , "com.ibm.ws.sib.mfp.CompHandshakeFactory.createHandshakeInstance" , "88" ) ; SibTr . error ( tc , "UNABLE_TO_CREATE_COMPHANDSHAKE_CWSIF0051" , e ) ; throw e ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "createHandshakeInstance" ) ;
public class ApiOvhDomain { /** * List all the rules for a specific cartId / itemId * REST : GET / domain / rules * @ param cartId [ required ] Cart ID concerned for the rules * @ param itemId [ required ] Item ID concerned for the rules * API beta */ public OvhRule rules_GET ( String cartId , Long itemId ) throws IOException { } }
String qPath = "/domain/rules" ; StringBuilder sb = path ( qPath ) ; query ( sb , "cartId" , cartId ) ; query ( sb , "itemId" , itemId ) ; String resp = execN ( qPath , "GET" , sb . toString ( ) , null ) ; return convertTo ( resp , OvhRule . class ) ;
public class TypeDesc { /** * Acquire a TypeDesc from a type descriptor . */ public static TypeDesc forDescriptor ( final String desc ) throws IllegalArgumentException { } }
TypeDesc type = cDescriptorsToInstances . get ( desc ) ; if ( type != null ) { return type ; } // TODO : Support generics in descriptor . String rootDesc = desc ; int cursor = 0 ; int dim = 0 ; try { char c ; while ( ( c = rootDesc . charAt ( cursor ++ ) ) == '[' ) { dim ++ ; } switch ( c ) { case 'V' : type = VOID ; break ; case 'Z' : type = BOOLEAN ; break ; case 'C' : type = CHAR ; break ; case 'B' : type = BYTE ; break ; case 'S' : type = SHORT ; break ; case 'I' : type = INT ; break ; case 'J' : type = LONG ; break ; case 'F' : type = FLOAT ; break ; case 'D' : type = DOUBLE ; break ; case 'L' : if ( dim > 0 ) { rootDesc = rootDesc . substring ( dim ) ; cursor = 1 ; } StringBuffer name = new StringBuffer ( rootDesc . length ( ) - 2 ) ; while ( ( c = rootDesc . charAt ( cursor ++ ) ) != ';' ) { if ( c == '/' ) { c = '.' ; } name . append ( c ) ; } type = intern ( new ObjectType ( rootDesc , name . toString ( ) ) ) ; break ; default : throw invalidDescriptor ( desc ) ; } } catch ( NullPointerException e ) { throw invalidDescriptor ( desc ) ; } catch ( IndexOutOfBoundsException e ) { throw invalidDescriptor ( desc ) ; } if ( cursor != rootDesc . length ( ) ) { throw invalidDescriptor ( desc ) ; } while ( -- dim >= 0 ) { type = type . toArrayType ( ) ; } cDescriptorsToInstances . put ( desc , type ) ; return type ;
public class CmsImportVersion2 { /** * Cleans up member variables after the import is finished . < p > * This is required since there is only one instance for * each import version that is kept in memory and reused . < p > */ @ Override protected void cleanUp ( ) { } }
m_pageStorage = null ; m_folderStorage = null ; m_webAppNames = null ; m_webappUrl = null ; super . cleanUp ( ) ;
public class MaterialAPI { /** * 删除一个永久素材 * @ param mediaId 素材ID * @ return 删除结果 */ public ResultType deleteMaterial ( String mediaId ) { } }
String url = BASE_API_URL + "cgi-bin/material/del_material?access_token=#" ; final Map < String , String > param = new HashMap < String , String > ( ) ; param . put ( "media_id" , mediaId ) ; BaseResponse response = executePost ( url , JSONUtil . toJson ( param ) ) ; return ResultType . get ( response . getErrcode ( ) ) ;
public class XmpSchema { /** * @ see java . util . Properties # setProperty ( java . lang . String , java . lang . String ) * @ param key * @ param value * @ return the previous property ( null if there wasn ' t one ) */ public Object setProperty ( String key , LangAlt value ) { } }
return super . setProperty ( key , value . toString ( ) ) ;
public class Transition { /** * concat an array history into a single INDArry of as many channel * as element in the history array * @ param history the history to concat * @ return the multi - channel INDArray */ public static INDArray concat ( INDArray [ ] history ) { } }
INDArray arr = Nd4j . concat ( 0 , history ) ; return arr ;
public class ResolveSource { /** * object with nothing in it instead . */ private AbstractConfigObject rootMustBeObj ( Container value ) { } }
if ( value instanceof AbstractConfigObject ) { return ( AbstractConfigObject ) value ; } else { return SimpleConfigObject . empty ( ) ; }
public class VectorPointer { /** * USED IN DROP */ public Object [ ] copyRange ( Object [ ] array , int oldLeft , int newLeft ) { } }
Object [ ] elems = new Object [ 32 ] ; System . arraycopy ( array , oldLeft , elems , newLeft , 32 - Math . max ( newLeft , oldLeft ) ) ; return elems ;
public class SubscriptionSchedule { /** * Retrieves the list of subscription schedule revisions for a subscription schedule . */ public SubscriptionScheduleRevisionCollection revisions ( Map < String , Object > params , RequestOptions options ) throws StripeException { } }
String url = String . format ( "%s%s" , Stripe . getApiBase ( ) , String . format ( "/v1/subscription_schedules/%s/revisions" , ApiResource . urlEncodeId ( this . getId ( ) ) ) ) ; return requestCollection ( url , params , SubscriptionScheduleRevisionCollection . class , options ) ;
public class NameRegistryClient { /** * Unregisters an identifier . * @ param id an identifier */ @ Override public void unregister ( final Identifier id ) throws IOException { } }
final Link < NamingMessage > link = transport . open ( serverSocketAddr , codec , new LoggingLinkListener < NamingMessage > ( ) ) ; link . write ( new NamingUnregisterRequest ( id ) ) ;
public class SimpleCassandraDao { /** * Insert a new value keyed by key * @ param key Key for the value * @ param value the String value to insert */ public void insert ( final String key , final String columnName , final String value ) { } }
createMutator ( keyspace , serializer ) . insert ( key , columnFamilyName , createColumn ( columnName , value , serializer , serializer ) ) ;
public class BruteForceUtil { /** * Calculates the brute force cardinality of a given password . * The brut force cardinality is the estimated range of character a brute * force method would use to crack the password . * @ param character the password we are estimating the brute force cardinality * @ return the brute force cardinality */ public static int getBrutForceCardinality ( final char character ) { } }
boolean lower = false , upper = false , digits = false , symbols = false , unicode = false ; if ( 0x30 <= character && character <= 0x39 ) { digits = true ; } else if ( 0x41 <= character && character <= 0x5a ) { upper = true ; } else if ( 0x61 <= character && character <= 0x7a ) { lower = true ; } else if ( character <= 0x7f ) { symbols = true ; } else { unicode = true ; } int cardinality = 0 ; if ( digits ) { cardinality += 10 ; } if ( upper ) { cardinality += 26 ; } if ( lower ) { cardinality += 26 ; } if ( symbols ) { cardinality += 33 ; } if ( unicode ) { cardinality += 100 ; } return cardinality ;
public class StringIterate { /** * Count the number of occurrences of the specified char . * @ since 7.0 */ public static int occurrencesOfChar ( String string , final char value ) { } }
return StringIterate . countChar ( string , new CharPredicate ( ) { public boolean accept ( char character ) { return value == character ; } } ) ;
public class DescribeLoadBalancerAttributesResult { /** * Information about the load balancer attributes . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setAttributes ( java . util . Collection ) } or { @ link # withAttributes ( java . util . Collection ) } if you want to * override the existing values . * @ param attributes * Information about the load balancer attributes . * @ return Returns a reference to this object so that method calls can be chained together . */ public DescribeLoadBalancerAttributesResult withAttributes ( LoadBalancerAttribute ... attributes ) { } }
if ( this . attributes == null ) { setAttributes ( new java . util . ArrayList < LoadBalancerAttribute > ( attributes . length ) ) ; } for ( LoadBalancerAttribute ele : attributes ) { this . attributes . add ( ele ) ; } return this ;
public class PagesSpatialIndexFactory { /** * Called by { @ link SpatialIndexBuilderOperator } to provide a * { @ link Supplier } of spatial indexes for { @ link SpatialJoinOperator } s to use . * Returns a Future that completes once all the { @ link SpatialJoinOperator } s have completed . */ public ListenableFuture < ? > lendPagesSpatialIndex ( Supplier < PagesSpatialIndex > pagesSpatialIndex ) { } }
requireNonNull ( pagesSpatialIndex , "pagesSpatialIndex is null" ) ; if ( activeProbeOperators . getFreeFuture ( ) . isDone ( ) ) { return NOT_BLOCKED ; } List < SettableFuture < PagesSpatialIndex > > settableFutures ; synchronized ( this ) { verify ( this . pagesSpatialIndex == null ) ; this . pagesSpatialIndex = pagesSpatialIndex ; settableFutures = ImmutableList . copyOf ( pagesSpatialIndexFutures ) ; pagesSpatialIndexFutures . clear ( ) ; } for ( SettableFuture < PagesSpatialIndex > settableFuture : settableFutures ) { settableFuture . set ( pagesSpatialIndex . get ( ) ) ; } return activeProbeOperators . getFreeFuture ( ) ;
public class PeepholeMinimizeConditions { /** * Try turning IF nodes into smaller HOOKs * Returns the replacement for n or the original if no replacement was * necessary . */ private Node tryMinimizeIf ( Node n ) { } }
Node parent = n . getParent ( ) ; Node originalCond = n . getFirstChild ( ) ; /* If the condition is a literal , we ' ll let other * optimizations try to remove useless code . */ if ( NodeUtil . isLiteralValue ( originalCond , true ) ) { return n ; } Node thenBranch = originalCond . getNext ( ) ; Node elseBranch = thenBranch . getNext ( ) ; MinimizedCondition minCond = MinimizedCondition . fromConditionNode ( originalCond ) ; // Compute two minimized representations . The first representation counts // a leading NOT node , and the second ignores a leading NOT node . // If we can fold the if statement into a HOOK or boolean operation , // then the NOT node does not matter , and we prefer the second condition . // If we cannot fold the if statement , then we prefer the first condition . MeasuredNode unnegatedCond = minCond . getMinimized ( MinimizationStyle . PREFER_UNNEGATED ) ; MeasuredNode shortCond = minCond . getMinimized ( MinimizationStyle . ALLOW_LEADING_NOT ) ; if ( elseBranch == null ) { if ( isFoldableExpressBlock ( thenBranch ) ) { Node expr = getBlockExpression ( thenBranch ) ; if ( ! late && isPropertyAssignmentInExpression ( expr ) ) { // Keep opportunities for CollapseProperties such as // a . longIdentifier | | a . longIdentifier = . . . - > var a = . . . ; // until CollapseProperties has been run . replaceNode ( originalCond , unnegatedCond ) ; return n ; } if ( shortCond . isNot ( ) ) { // if ( ! x ) bar ( ) ; - > x | | bar ( ) ; Node replacementCond = replaceNode ( originalCond , shortCond . withoutNot ( ) ) . detach ( ) ; Node or = IR . or ( replacementCond , expr . removeFirstChild ( ) ) . srcref ( n ) ; Node newExpr = NodeUtil . newExpr ( or ) ; parent . replaceChild ( n , newExpr ) ; reportChangeToEnclosingScope ( parent ) ; return newExpr ; } // True , but removed for performance reasons . // Preconditions . checkState ( shortCond . isEquivalentTo ( unnegatedCond ) ) ; // if ( x ) foo ( ) ; - > x & & foo ( ) ; if ( shortCond . isLowerPrecedenceThan ( AND_PRECEDENCE ) && isLowerPrecedence ( expr . getFirstChild ( ) , AND_PRECEDENCE ) ) { // One additional set of parentheses is worth the change even if // there is no immediate code size win . However , two extra pair of // { } , we would have to think twice . ( unless we know for sure the // we can further optimize its parent . replaceNode ( originalCond , shortCond ) ; return n ; } Node replacementCond = replaceNode ( originalCond , shortCond ) . detach ( ) ; Node and = IR . and ( replacementCond , expr . removeFirstChild ( ) ) . srcref ( n ) ; Node newExpr = NodeUtil . newExpr ( and ) ; parent . replaceChild ( n , newExpr ) ; reportChangeToEnclosingScope ( parent ) ; return newExpr ; } else { // Try to combine two IF - ELSE if ( NodeUtil . isStatementBlock ( thenBranch ) && thenBranch . hasOneChild ( ) ) { Node innerIf = thenBranch . getFirstChild ( ) ; if ( innerIf . isIf ( ) ) { Node innerCond = innerIf . getFirstChild ( ) ; Node innerThenBranch = innerCond . getNext ( ) ; Node innerElseBranch = innerThenBranch . getNext ( ) ; if ( innerElseBranch == null && ! ( unnegatedCond . isLowerPrecedenceThan ( AND_PRECEDENCE ) && isLowerPrecedence ( innerCond , AND_PRECEDENCE ) ) ) { Node replacementCond = replaceNode ( originalCond , unnegatedCond ) . detach ( ) ; n . detachChildren ( ) ; n . addChildToBack ( IR . and ( replacementCond , innerCond . detach ( ) ) . srcref ( originalCond ) ) ; n . addChildToBack ( innerThenBranch . detach ( ) ) ; reportChangeToEnclosingScope ( n ) ; // Not worth trying to fold the current IF - ELSE into & & because // the inner IF - ELSE wasn ' t able to be folded into & & anyways . return n ; } } } } replaceNode ( originalCond , unnegatedCond ) ; return n ; } /* TODO ( dcc ) This modifies the siblings of n , which is undesirable for a * peephole optimization . This should probably get moved to another pass . */ tryRemoveRepeatedStatements ( n ) ; // if ( ! x ) foo ( ) ; else bar ( ) ; - > if ( x ) bar ( ) ; else foo ( ) ; // An additional set of curly braces isn ' t worth it . if ( shortCond . isNot ( ) && ! consumesDanglingElse ( elseBranch ) ) { replaceNode ( originalCond , shortCond . withoutNot ( ) ) ; n . removeChild ( thenBranch ) ; n . addChildToBack ( thenBranch ) ; reportChangeToEnclosingScope ( n ) ; return n ; } // if ( x ) return 1 ; else return 2 ; - > return x ? 1:2; if ( isReturnExpressBlock ( thenBranch ) && isReturnExpressBlock ( elseBranch ) ) { Node thenExpr = getBlockReturnExpression ( thenBranch ) ; Node elseExpr = getBlockReturnExpression ( elseBranch ) ; Node replacementCond = replaceNode ( originalCond , shortCond ) . detach ( ) ; thenExpr . detach ( ) ; elseExpr . detach ( ) ; // note - we ignore any cases with " return ; " , technically this // can be converted to " return undefined ; " or some variant , but // that does not help code size . Node returnNode = IR . returnNode ( IR . hook ( replacementCond , thenExpr , elseExpr ) . srcref ( n ) ) ; parent . replaceChild ( n , returnNode ) ; reportChangeToEnclosingScope ( returnNode ) ; return returnNode ; } boolean thenBranchIsExpressionBlock = isFoldableExpressBlock ( thenBranch ) ; boolean elseBranchIsExpressionBlock = isFoldableExpressBlock ( elseBranch ) ; if ( thenBranchIsExpressionBlock && elseBranchIsExpressionBlock ) { Node thenOp = getBlockExpression ( thenBranch ) . getFirstChild ( ) ; Node elseOp = getBlockExpression ( elseBranch ) . getFirstChild ( ) ; if ( thenOp . getToken ( ) == elseOp . getToken ( ) ) { // if ( x ) a = 1 ; else a = 2 ; - > a = x ? 1:2; if ( NodeUtil . isAssignmentOp ( thenOp ) ) { Node lhs = thenOp . getFirstChild ( ) ; if ( areNodesEqualForInlining ( lhs , elseOp . getFirstChild ( ) ) // if LHS has side effects , don ' t proceed [ since the optimization // evaluates LHS before cond ] // NOTE - there are some circumstances where we can // proceed even if there are side effects . . . && ! mayEffectMutableState ( lhs ) && ( ! mayHaveSideEffects ( originalCond ) || ( thenOp . isAssign ( ) && thenOp . getFirstChild ( ) . isName ( ) ) ) ) { Node replacementCond = replaceNode ( originalCond , shortCond ) . detach ( ) ; Node assignName = thenOp . removeFirstChild ( ) ; Node thenExpr = thenOp . removeFirstChild ( ) ; Node elseExpr = elseOp . getLastChild ( ) ; elseOp . removeChild ( elseExpr ) ; Node hookNode = IR . hook ( replacementCond , thenExpr , elseExpr ) . srcref ( n ) ; Node assign = new Node ( thenOp . getToken ( ) , assignName , hookNode ) . srcref ( thenOp ) ; Node expr = NodeUtil . newExpr ( assign ) ; parent . replaceChild ( n , expr ) ; reportChangeToEnclosingScope ( parent ) ; return expr ; } } } // if ( x ) foo ( ) ; else bar ( ) ; - > x ? foo ( ) : bar ( ) Node replacementCond = replaceNode ( originalCond , shortCond ) . detach ( ) ; thenOp . detach ( ) ; elseOp . detach ( ) ; Node expr = IR . exprResult ( IR . hook ( replacementCond , thenOp , elseOp ) . srcref ( n ) ) ; parent . replaceChild ( n , expr ) ; reportChangeToEnclosingScope ( parent ) ; return expr ; } boolean thenBranchIsVar = isVarBlock ( thenBranch ) ; boolean elseBranchIsVar = isVarBlock ( elseBranch ) ; // if ( x ) var y = 1 ; else y = 2 - > var y = x ? 1:2 if ( thenBranchIsVar && elseBranchIsExpressionBlock && getBlockExpression ( elseBranch ) . getFirstChild ( ) . isAssign ( ) ) { Node var = getBlockVar ( thenBranch ) ; Node elseAssign = getBlockExpression ( elseBranch ) . getFirstChild ( ) ; Node name1 = var . getFirstChild ( ) ; Node maybeName2 = elseAssign . getFirstChild ( ) ; if ( name1 . hasChildren ( ) && maybeName2 . isName ( ) && name1 . getString ( ) . equals ( maybeName2 . getString ( ) ) ) { checkState ( name1 . hasOneChild ( ) ) ; Node thenExpr = name1 . removeFirstChild ( ) ; Node elseExpr = elseAssign . getLastChild ( ) . detach ( ) ; Node replacementCond = replaceNode ( originalCond , shortCond ) . detach ( ) ; Node hookNode = IR . hook ( replacementCond , thenExpr , elseExpr ) . srcref ( n ) ; var . detach ( ) ; name1 . addChildToBack ( hookNode ) ; parent . replaceChild ( n , var ) ; reportChangeToEnclosingScope ( parent ) ; return var ; } // if ( x ) y = 1 ; else var y = 2 - > var y = x ? 1:2 } else if ( elseBranchIsVar && thenBranchIsExpressionBlock && getBlockExpression ( thenBranch ) . getFirstChild ( ) . isAssign ( ) ) { Node var = getBlockVar ( elseBranch ) ; Node thenAssign = getBlockExpression ( thenBranch ) . getFirstChild ( ) ; Node maybeName1 = thenAssign . getFirstChild ( ) ; Node name2 = var . getFirstChild ( ) ; if ( name2 . hasChildren ( ) && maybeName1 . isName ( ) && maybeName1 . getString ( ) . equals ( name2 . getString ( ) ) ) { Node thenExpr = thenAssign . getLastChild ( ) . detach ( ) ; checkState ( name2 . hasOneChild ( ) ) ; Node elseExpr = name2 . removeFirstChild ( ) ; Node replacementCond = replaceNode ( originalCond , shortCond ) . detach ( ) ; Node hookNode = IR . hook ( replacementCond , thenExpr , elseExpr ) . srcref ( n ) ; var . detach ( ) ; name2 . addChildToBack ( hookNode ) ; parent . replaceChild ( n , var ) ; reportChangeToEnclosingScope ( parent ) ; return var ; } } replaceNode ( originalCond , unnegatedCond ) ; return n ;
public class LocalTransactionManager { /** * { @ inheritDoc } * @ see jp . co . future . uroborosql . tx . TransactionManager # setRollbackOnly ( ) */ @ Override public void setRollbackOnly ( ) { } }
Optional < LocalTransactionContext > txContext = currentTxContext ( ) ; if ( txContext . isPresent ( ) ) { txContext . get ( ) . setRollbackOnly ( ) ; } else { this . unmanagedTransaction . ifPresent ( LocalTransactionContext :: setRollbackOnly ) ; }
public class ListSubscribedRuleGroupsRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ListSubscribedRuleGroupsRequest listSubscribedRuleGroupsRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( listSubscribedRuleGroupsRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( listSubscribedRuleGroupsRequest . getNextMarker ( ) , NEXTMARKER_BINDING ) ; protocolMarshaller . marshall ( listSubscribedRuleGroupsRequest . getLimit ( ) , LIMIT_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class IntList { /** * Returns whether no elements of this List match the provided predicate . * @ param filter * @ return */ public < E extends Exception > boolean noneMatch ( Try . IntPredicate < E > filter ) throws E { } }
return noneMatch ( 0 , size ( ) , filter ) ;
public class SimpleRadioButtonControl { /** * Sets up bindings for all radio buttons . */ private void setupRadioButtonEventHandlers ( ) { } }
for ( int i = 0 ; i < radioButtons . size ( ) ; i ++ ) { final int j = i ; radioButtons . get ( j ) . setOnAction ( event -> field . select ( j ) ) ; }
public class Iced { /** * Java serializers use H2Os Icing */ @ Override public void readExternal ( ObjectInput ois ) throws IOException , ClassNotFoundException { } }
int x = ois . readInt ( ) ; byte [ ] buf = MemoryManager . malloc1 ( x ) ; ois . readFully ( buf ) ; read ( new AutoBuffer ( buf ) ) ;
public class Context { /** * Set the current context */ private void setCurrent ( Map < String , Object > current ) { } }
if ( current instanceof Context ) { throw new IllegalArgumentException ( "Don't using the " + Context . class . getName ( ) + " object as a parameters, it's implicitly delivery by thread-local. parameter context: " + ( ( Context ) current ) . thread . getName ( ) + ", current context: " + thread . getName ( ) ) ; } this . current = current ;
public class Format { /** * Formats a string into a larger string ( like sprintf in C ) * @ param s the value to format * @ return the formatted string */ public String form ( String s ) { } }
if ( fmt != 's' ) { throw new java . lang . IllegalArgumentException ( ) ; } if ( precision >= 0 && precision < s . length ( ) ) { s = s . substring ( 0 , precision ) ; } return pad ( s ) ;
public class RegistrationManagerImpl { /** * { @ inheritDoc } */ @ Override public User getUser ( String name ) throws ServiceException { } }
if ( ! isStarted ) { ServiceDirectoryError error = new ServiceDirectoryError ( ErrorCode . SERVICE_DIRECTORY_MANAGER_FACTORY_CLOSED ) ; throw new ServiceException ( error ) ; } if ( name == null || name . isEmpty ( ) ) { throw new IllegalArgumentException ( "The name can not be empty." ) ; } return getRegistrationService ( ) . getUser ( name ) ;
public class BenchmarkMatrixMultAccessors { /** * Wrapper functions with no bounds checking are used to access matrix internals */ public static long wrapped ( DMatrixRMaj a , DMatrixRMaj b , DMatrixRMaj c ) { } }
long timeBefore = System . currentTimeMillis ( ) ; double valA ; int indexCbase = 0 ; int endOfKLoop = b . numRows * b . numCols ; for ( int i = 0 ; i < a . numRows ; i ++ ) { int indexA = i * a . numCols ; // need to assign dataC to a value initially int indexB = 0 ; int indexC = indexCbase ; int end = indexB + b . numCols ; valA = a . get ( indexA ++ ) ; while ( indexB < end ) { c . set ( indexC ++ , valA * b . get ( indexB ++ ) ) ; } // now add to it while ( indexB != endOfKLoop ) { // k loop indexC = indexCbase ; end = indexB + b . numCols ; valA = a . get ( indexA ++ ) ; while ( indexB < end ) { // j loop c . plus ( indexC ++ , valA * b . get ( indexB ++ ) ) ; } } indexCbase += c . numCols ; } return System . currentTimeMillis ( ) - timeBefore ;
public class GetPendingJobExecutionsResult { /** * A list of JobExecutionSummary objects with status QUEUED . * @ param queuedJobs * A list of JobExecutionSummary objects with status QUEUED . */ public void setQueuedJobs ( java . util . Collection < JobExecutionSummary > queuedJobs ) { } }
if ( queuedJobs == null ) { this . queuedJobs = null ; return ; } this . queuedJobs = new java . util . ArrayList < JobExecutionSummary > ( queuedJobs ) ;
public class AmazonSQSClient { /** * Sets the value of one or more queue attributes . When you change a queue ' s attributes , the change can take up to * 60 seconds for most of the attributes to propagate throughout the Amazon SQS system . Changes made to the * < code > MessageRetentionPeriod < / code > attribute can take up to 15 minutes . * < note > * In the future , new attributes might be added . If you write code that calls this action , we recommend that you * structure your code so that it can handle new attributes gracefully . * Cross - account permissions don ' t apply to this action . For more information , see see < a href = * " http : / / docs . aws . amazon . com / AWSSimpleQueueService / latest / SQSDeveloperGuide / sqs - customer - managed - policy - examples . html # grant - cross - account - permissions - to - role - and - user - name " * > Grant Cross - Account Permissions to a Role and a User Name < / a > in the < i > Amazon Simple Queue Service Developer * Guide < / i > . * < / note > * @ param setQueueAttributesRequest * @ return Result of the SetQueueAttributes operation returned by the service . * @ throws InvalidAttributeNameException * The specified attribute doesn ' t exist . * @ sample AmazonSQS . SetQueueAttributes * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / sqs - 2012-11-05 / SetQueueAttributes " target = " _ top " > AWS API * Documentation < / a > */ @ Override public SetQueueAttributesResult setQueueAttributes ( SetQueueAttributesRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeSetQueueAttributes ( request ) ;
public class VirtualList { /** * Returns a portion of this list between the specified from , inclusive , and to , exclusive . * @ param from low endpoint ( inclusive ) of the subList * @ param to high endpoint ( exclusive ) of the subList * @ return the sub list of item */ List < T > subList ( long from , long to ) { } }
if ( fullListSize > 0 && to - from > 0 ) { checkRange ( from ) ; // subList takes a [ from ; to [ range checkRange ( to - 1 ) ; // check if the data of the required sub list are available assertSubRange ( from , to - 1 ) ; // cannot exceed Integer . MAX _ VALUE ( checked by checkSubRange ) return subList . subList ( ( int ) ( from - subListOffset ) , ( int ) ( to - subListOffset ) ) ; } return new ArrayList < > ( ) ;
public class ZipFile { /** * Returns an input stream for reading the contents of the specified * zip file entry . * < p > Closing this ZIP file will , in turn , close all input * streams that have been returned by invocations of this method . * @ param entry the zip file entry * @ return the input stream for reading the contents of the specified * zip file entry . * @ throws ZipException if a ZIP format error has occurred * @ throws IOException if an I / O error has occurred * @ throws IllegalStateException if the zip file has been closed */ public InputStream getInputStream ( ZipEntry entry ) throws IOException { } }
if ( entry == null ) { throw new NullPointerException ( "entry" ) ; } long jzentry = 0 ; ZipFileInputStream in = null ; synchronized ( this ) { ensureOpen ( ) ; if ( ! zc . isUTF8 ( ) && ( entry . flag & EFS ) != 0 ) { jzentry = getEntry ( jzfile , zc . getBytesUTF8 ( entry . name ) , true ) ; } else { jzentry = getEntry ( jzfile , zc . getBytes ( entry . name ) , true ) ; } if ( jzentry == 0 ) { return null ; } in = new ZipFileInputStream ( jzentry ) ; switch ( getEntryMethod ( jzentry ) ) { case STORED : synchronized ( streams ) { streams . put ( in , null ) ; } return in ; case DEFLATED : // MORE : Compute good size for inflater stream : long size = getEntrySize ( jzentry ) + 2 ; // Inflater likes a bit of slack if ( size > 65536 ) size = 8192 ; if ( size <= 0 ) size = 4096 ; Inflater inf = getInflater ( ) ; InputStream is = new ZipFileInflaterInputStream ( in , inf , ( int ) size ) ; synchronized ( streams ) { streams . put ( is , inf ) ; } return is ; default : throw new ZipException ( "invalid compression method" ) ; } }
public class LimesurveyRC { /** * Gets groups from a survey . * The groups are ordered using the " group _ order " field . * @ param surveyId the survey id you want to get the groups * @ return a stream of groups in an ordered order * @ throws LimesurveyRCException the limesurvey rc exception */ public Stream < LsQuestionGroup > getGroups ( int surveyId ) throws LimesurveyRCException { } }
JsonElement result = callRC ( new LsApiBody ( "list_groups" , getParamsWithKey ( surveyId ) ) ) ; List < LsQuestionGroup > questionGroups = gson . fromJson ( result , new TypeToken < List < LsQuestionGroup > > ( ) { } . getType ( ) ) ; return questionGroups . stream ( ) . sorted ( Comparator . comparing ( LsQuestionGroup :: getOrder ) ) ;
public class CdnClient { /** * Get hit rate statistics with specified attributes . * @ param request The request containing all the options related to the statistics . * @ return Details of statistics */ public GetStatHitRateResponse getStatHitRate ( GetStatHitRateRequest request ) { } }
InternalRequest internalRequest = this . createRequest ( request , HttpMethodName . GET , STAT , "hitrate" ) ; if ( request . getStartTime ( ) != null ) { internalRequest . addParameter ( "startTime" , DateUtils . formatAlternateIso8601Date ( request . getStartTime ( ) ) ) ; } if ( request . getEndTime ( ) != null ) { internalRequest . addParameter ( "endTime" , DateUtils . formatAlternateIso8601Date ( request . getEndTime ( ) ) ) ; } if ( request . getDomain ( ) != null ) { internalRequest . addParameter ( "domain" , request . getDomain ( ) ) ; } if ( request . getPeriod ( ) != null ) { internalRequest . addParameter ( "period" , String . valueOf ( request . getPeriod ( ) ) ) ; } return this . invokeHttpClient ( internalRequest , GetStatHitRateResponse . class ) ;
public class Attachment { /** * Equivalent to { @ link com . tngtech . jgiven . attachment . Attachment # Attachment ( String , MediaType ) } * @ throws java . lang . IllegalArgumentException if mediaType is not binary */ public static Attachment fromBase64 ( String base64encodedContent , MediaType mediaType ) { } }
if ( ! mediaType . isBinary ( ) ) { throw new IllegalArgumentException ( "MediaType must be binary" ) ; } return new Attachment ( base64encodedContent , mediaType ) ;
public class MPXReader { /** * Creates and populates a new task relationship . * @ param field which task field source of data * @ param sourceTask relationship source task * @ param relationship relationship string * @ throws MPXJException */ private void populateRelation ( TaskField field , Task sourceTask , String relationship ) throws MPXJException { } }
int index = 0 ; int length = relationship . length ( ) ; // Extract the identifier while ( ( index < length ) && ( Character . isDigit ( relationship . charAt ( index ) ) == true ) ) { ++ index ; } Integer taskID ; try { taskID = Integer . valueOf ( relationship . substring ( 0 , index ) ) ; } catch ( NumberFormatException ex ) { throw new MPXJException ( MPXJException . INVALID_FORMAT + " '" + relationship + "'" ) ; } // Now find the task , so we can extract the unique ID Task targetTask ; if ( field == TaskField . PREDECESSORS ) { targetTask = m_projectFile . getTaskByID ( taskID ) ; } else { targetTask = m_projectFile . getTaskByUniqueID ( taskID ) ; } // If we haven ' t reached the end , we next expect to find // SF , SS , FS , FF RelationType type = null ; Duration lag = null ; if ( index == length ) { type = RelationType . FINISH_START ; lag = Duration . getInstance ( 0 , TimeUnit . DAYS ) ; } else { if ( ( index + 1 ) == length ) { throw new MPXJException ( MPXJException . INVALID_FORMAT + " '" + relationship + "'" ) ; } type = RelationTypeUtility . getInstance ( m_locale , relationship . substring ( index , index + 2 ) ) ; index += 2 ; if ( index == length ) { lag = Duration . getInstance ( 0 , TimeUnit . DAYS ) ; } else { if ( relationship . charAt ( index ) == '+' ) { ++ index ; } lag = DurationUtility . getInstance ( relationship . substring ( index ) , m_formats . getDurationDecimalFormat ( ) , m_locale ) ; } } if ( type == null ) { throw new MPXJException ( MPXJException . INVALID_FORMAT + " '" + relationship + "'" ) ; } // We have seen at least one example MPX file where an invalid task ID // is present . We ' ll ignore this as the schedule is otherwise valid . if ( targetTask != null ) { Relation relation = sourceTask . addPredecessor ( targetTask , type , lag ) ; m_eventManager . fireRelationReadEvent ( relation ) ; }
public class SortByPositionOperator { protected void recursivelySortChildAreas ( Area root , final boolean columnFirst ) { } }
if ( root . getChildCount ( ) > 1 ) { Vector < Area > list = new Vector < Area > ( root . getChildren ( ) ) ; Collections . sort ( list , new Comparator < Area > ( ) { public int compare ( Area a1 , Area a2 ) { if ( ! columnFirst ) return a1 . getY1 ( ) == a2 . getY1 ( ) ? a1 . getX1 ( ) - a2 . getX1 ( ) : a1 . getY1 ( ) - a2 . getY1 ( ) ; else return a1 . getX1 ( ) == a2 . getX1 ( ) ? a1 . getY1 ( ) - a2 . getY1 ( ) : a1 . getX1 ( ) - a2 . getX1 ( ) ; } } ) ; root . removeAllChildren ( ) ; root . appendChildren ( list ) ; } for ( int i = 0 ; i < root . getChildCount ( ) ; i ++ ) recursivelySortChildAreas ( root . getChildAt ( i ) , columnFirst ) ;
public class FormatOptions { /** * Helper to check for legal combinations of flags . */ static boolean checkFlagConsistency ( int flags , boolean hasWidth ) { } }
// Check that we specify at most one of ' prefix plus ' and ' prefix space ' . if ( ( flags & ( FLAG_PREFIX_PLUS_FOR_POSITIVE_VALUES | FLAG_PREFIX_SPACE_FOR_POSITIVE_VALUES ) ) == ( FLAG_PREFIX_PLUS_FOR_POSITIVE_VALUES | FLAG_PREFIX_SPACE_FOR_POSITIVE_VALUES ) ) { return false ; } // Check that we specify at most one of ' left align ' and ' leading zeros ' . if ( ( flags & ( FLAG_LEFT_ALIGN | FLAG_SHOW_LEADING_ZEROS ) ) == ( FLAG_LEFT_ALIGN | FLAG_SHOW_LEADING_ZEROS ) ) { return false ; } // Check that if ' left align ' or ' leading zeros ' is specified , we also have a width value . if ( ( flags & ( FLAG_LEFT_ALIGN | FLAG_SHOW_LEADING_ZEROS ) ) != 0 && ! hasWidth ) { return false ; } return true ;
public class BaseMojo { /** * Add any relevant project dependencies to the classpath . Indirectly takes * includePluginDependencies and ExecutableDependency into consideration . */ protected void addExtraPluginDependencies ( Set < Artifact > artifacts ) throws MojoExecutionException { } }
if ( extraPluginDependencyArtifactId == null && extendedPluginDependencyArtifactId == null ) { return ; } Set < Artifact > deps = new HashSet < Artifact > ( this . pluginDependencies ) ; for ( Artifact artifact : deps ) { // must if ( artifact . getArtifactId ( ) . equals ( extraPluginDependencyArtifactId ) || artifact . getArtifactId ( ) . equals ( extendedPluginDependencyArtifactId ) ) { getLog ( ) . debug ( "Adding extra plugin dependency artifact: " + artifact . getArtifactId ( ) + " to classpath" ) ; artifacts . add ( artifact ) ; // add the transient dependencies of this artifact Set < Artifact > resolvedDeps = resolveExecutableDependencies ( artifact ) ; for ( Artifact dep : resolvedDeps ) { getLog ( ) . debug ( "Adding extra plugin dependency artifact: " + dep . getArtifactId ( ) + " to classpath" ) ; artifacts . add ( dep ) ; } } }
public class EtcdClient { /** * Get the version of the Etcd server * @ return version as String * @ deprecated use version ( ) when using etcd 2.1 + . */ @ Deprecated public String getVersion ( ) { } }
try { return new EtcdOldVersionRequest ( this . client , retryHandler ) . send ( ) . get ( ) ; } catch ( IOException | EtcdException | EtcdAuthenticationException | TimeoutException e ) { return null ; }
public class GlobalQuartzScheduler { /** * Schedule a new job that should be executed now and only once . * @ param sJobName * Name of the job - must be unique within the whole system ! * @ param aJobClass * The Job class to be executed . * @ param aJobData * Optional job data map . * @ return The created trigger key for further usage . Never < code > null < / code > . */ @ Nonnull public TriggerKey scheduleJobNowOnce ( @ Nonnull final String sJobName , @ Nonnull final Class < ? extends IJob > aJobClass , @ Nullable final Map < String , ? extends Object > aJobData ) { } }
return scheduleJob ( sJobName , JDK8TriggerBuilder . newTrigger ( ) . startNow ( ) . withSchedule ( SimpleScheduleBuilder . simpleSchedule ( ) . withIntervalInMinutes ( 1 ) . withRepeatCount ( 0 ) ) , aJobClass , aJobData ) ;
public class SpannableStringBuilder { /** * Documentation from interface */ public SpannableStringBuilder insert ( int where , CharSequence tb ) { } }
return replace ( where , where , tb , 0 , tb . length ( ) ) ;
public class Node { /** * Get the outer HTML of this node . For example , on a { @ code p } element , may return { @ code < p > Para < / p > } . * @ return outer HTML * @ see Element # html ( ) * @ see Element # text ( ) */ public String outerHtml ( ) { } }
StringBuilder accum = StringUtil . borrowBuilder ( ) ; outerHtml ( accum ) ; return StringUtil . releaseBuilder ( accum ) ;
public class VTimeZone { /** * Writes RFC2445 VTIMEZONE data applicable for dates after * the specified start time . * @ param writer The < code > Writer < / code > used for the output * @ param start The start time * @ throws IOException If there were problems reading and writing to the writer . */ public void write ( Writer writer , long start ) throws IOException { } }
// Extract rules applicable to dates after the start time TimeZoneRule [ ] rules = tz . getTimeZoneRules ( start ) ; // Create a RuleBasedTimeZone with the subset rule RuleBasedTimeZone rbtz = new RuleBasedTimeZone ( tz . getID ( ) , ( InitialTimeZoneRule ) rules [ 0 ] ) ; for ( int i = 1 ; i < rules . length ; i ++ ) { rbtz . addTransitionRule ( rules [ i ] ) ; } String [ ] customProperties = null ; if ( olsonzid != null && ICU_TZVERSION != null ) { customProperties = new String [ 1 ] ; customProperties [ 0 ] = ICU_TZINFO_PROP + COLON + olsonzid + "[" + ICU_TZVERSION + "/Partial@" + start + "]" ; } writeZone ( writer , rbtz , customProperties ) ;
public class CmsImportVersion7 { /** * Sets the membership information that could not been set immediately , * because of import order issues . < p > */ public void setMembership ( ) { } }
if ( ( m_orgUnit == null ) || ( m_membership == null ) ) { return ; } // get the membership data to set Map < String , Map < String , String > > membership = m_membership . get ( m_orgUnit . getName ( ) ) ; if ( membership == null ) { return ; } // set group membership Map < String , String > groups = membership . get ( I_CmsPrincipal . PRINCIPAL_GROUP ) ; if ( groups != null ) { Iterator < Entry < String , String > > it = groups . entrySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { Entry < String , String > entry = it . next ( ) ; String userName = entry . getKey ( ) ; String groupName = entry . getValue ( ) ; // set the users group try { getCms ( ) . addUserToGroup ( userName , groupName ) ; } catch ( Throwable e ) { getReport ( ) . println ( Messages . get ( ) . container ( Messages . RPT_USER_COULDNT_BE_ADDED_TO_GROUP_2 , userName , groupName ) , I_CmsReport . FORMAT_WARNING ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( e . getLocalizedMessage ( ) , e ) ; } } } } // set role membership Map < String , String > roles = membership . get ( I_CmsPrincipal . PRINCIPAL_USER ) ; if ( roles != null ) { Iterator < Entry < String , String > > it = roles . entrySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { Entry < String , String > entry = it . next ( ) ; String userName = entry . getKey ( ) ; String roleName = entry . getValue ( ) ; // set the users roles CmsRole role = CmsRole . valueOfRoleName ( roleName ) ; try { // set the user role OpenCms . getRoleManager ( ) . addUserToRole ( getCms ( ) , role , userName ) ; return ; } catch ( Throwable e ) { getReport ( ) . println ( Messages . get ( ) . container ( Messages . RPT_USER_COULDNT_BE_ADDED_TO_ROLE_2 , userName , roleName ) , I_CmsReport . FORMAT_WARNING ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( e . getLocalizedMessage ( ) , e ) ; } } } }
public class BplusTree { /** * Returns the least key strictly greater than the given key , or null if there is no such key . * @ param key the key * @ return the Entry with least key strictly greater than the given key , or null if there is no such key . */ public synchronized TreeEntry < K , V > higherEntry ( final K key ) { } }
// Retorna la clave mas cercana mayor a la clave indicada return getRoundEntry ( key , true , false ) ;
public class UicStats { /** * Determines the serialized size of an object by serializng it to a byte array . * @ param obj the object to find the serialized size of . * @ return the serialized size of the given object , or - 1 on error . */ private int getSerializationSize ( final Object obj ) { } }
try { ByteArrayOutputStream bos = new ByteArrayOutputStream ( ) ; ObjectOutputStream oos = new ObjectOutputStream ( bos ) ; oos . writeObject ( obj ) ; oos . close ( ) ; byte [ ] bytes = bos . toByteArray ( ) ; return bytes . length ; } catch ( IOException ex ) { // Unable to serialize so cannot determine size . return - 1 ; }
public class ValidatingStreamReader { /** * Method called to resolve path to external DTD subset , given * system identifier . */ private URI resolveExtSubsetPath ( String systemId ) throws IOException { } }
// Do we have a context to use for resolving ? URL ctxt = ( mInput == null ) ? null : mInput . getSource ( ) ; /* Ok , either got a context or not ; let ' s create the URL based on * the id , and optional context : */ if ( ctxt == null ) { /* Call will try to figure out if system id has the protocol * in it ; if not , create a relative file , if it does , try to * resolve it . */ return URLUtil . uriFromSystemId ( systemId ) ; } URL url = URLUtil . urlFromSystemId ( systemId , ctxt ) ; try { return new URI ( url . toExternalForm ( ) ) ; } catch ( URISyntaxException e ) { // should never occur . . . throw new IOException ( "Failed to construct URI for external subset, URL = " + url . toExternalForm ( ) + ": " + e . getMessage ( ) ) ; }
public class VueTemplateCompiler { /** * Init the Nashorn engine and load the Vue compiler in it . */ private void initEngine ( ) { } }
engine = ( NashornScriptEngine ) new ScriptEngineManager ( ) . getEngineByName ( "nashorn" ) ; try { engine . eval ( "(function(global){global.global = global})(this);" ) ; engine . eval ( NashornVueTemplateCompiler . NASHORN_VUE_TEMPLATE_COMPILER ) ; } catch ( ScriptException e ) { e . printStackTrace ( ) ; }
public class AbstractFileServlet { /** * given throwable . */ private static String __getStackTrace ( Throwable throwable ) { } }
StringWriter stringWriter = new StringWriter ( ) ; throwable . printStackTrace ( new PrintWriter ( stringWriter ) ) ; return stringWriter . toString ( ) ;
public class PollForThirdPartyJobsResult { /** * Information about the jobs to take action on . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setJobs ( java . util . Collection ) } or { @ link # withJobs ( java . util . Collection ) } if you want to override the * existing values . * @ param jobs * Information about the jobs to take action on . * @ return Returns a reference to this object so that method calls can be chained together . */ public PollForThirdPartyJobsResult withJobs ( ThirdPartyJob ... jobs ) { } }
if ( this . jobs == null ) { setJobs ( new java . util . ArrayList < ThirdPartyJob > ( jobs . length ) ) ; } for ( ThirdPartyJob ele : jobs ) { this . jobs . add ( ele ) ; } return this ;
public class ThreadContext { /** * Set the properties for this thread context . * @ param sslProps */ public void setProperties ( Properties sslProps ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "setProperties" ) ; this . sslProperties = sslProps ;
public class BusinessdayCalendar { /** * / * ( non - Javadoc ) * @ see net . finmath . time . businessdaycalendar . BusinessdayCalendarInterface # createDateFromDateAndOffsetCode ( LocalDate , createDateFromDateAndOffsetCode ) */ public LocalDate createDateFromDateAndOffsetCode ( LocalDate baseDate , String dateOffsetCode ) { } }
dateOffsetCode = dateOffsetCode . trim ( ) ; StringTokenizer tokenizer = new StringTokenizer ( dateOffsetCode ) ; LocalDate maturityDate = baseDate ; while ( tokenizer . hasMoreTokens ( ) ) { String maturityCodeSingle = tokenizer . nextToken ( ) ; String [ ] maturityCodeSingleParts = maturityCodeSingle . split ( "(?<=[0-9|\\.])(?=[A-Z|a-z])" ) ; /* * If no unit is given , the number is interpreted as ACT / 365. * Otherwise we switch according to dateOffsetUnit . */ if ( maturityCodeSingleParts . length == 1 ) { // Try to parse a double as ACT / 365 double maturityValue = Double . valueOf ( maturityCodeSingle ) ; maturityDate = maturityDate . plusDays ( ( int ) Math . round ( maturityValue * 365 ) ) ; } else if ( maturityCodeSingleParts . length == 2 ) { int maturityValue = Integer . valueOf ( maturityCodeSingleParts [ 0 ] ) ; DateOffsetUnit dateOffsetUnit = DateOffsetUnit . getEnum ( maturityCodeSingleParts [ 1 ] ) ; switch ( dateOffsetUnit ) { case DAYS : { maturityDate = maturityDate . plusDays ( maturityValue ) ; break ; } case BUSINESS_DAYS : { maturityDate = getRolledDate ( maturityDate , maturityValue ) ; break ; } case WEEKS : { maturityDate = maturityDate . plusWeeks ( maturityValue ) ; break ; } case MONTHS : { maturityDate = maturityDate . plusMonths ( maturityValue ) ; break ; } case YEARS : { maturityDate = maturityDate . plusYears ( maturityValue ) ; break ; } default : throw new IllegalArgumentException ( "Cannot handle dateOffsetCode '" + dateOffsetCode + "'." ) ; } } else { throw new IllegalArgumentException ( "Cannot handle dateOffsetCode '" + dateOffsetCode + "'." ) ; } } return maturityDate ;
public class BrowseIterator { /** * Sets the collection up using the specified parameters . * @ param items the items in the iterator * @ param ps the page size for the structure * @ param sort the sorter , if any , to use for sorting */ private void setup ( Collection < T > items , int ps , Comparator < T > sort ) { } }
Collection < T > list ; Iterator < T > it ; if ( sort == null ) { list = items ; } else { list = new TreeSet < T > ( sort ) ; list . addAll ( items ) ; } sorter = sort ; pageSize = ps ; pages = new ArrayList < Collection < T > > ( ) ; it = list . iterator ( ) ; while ( it . hasNext ( ) ) { ArrayList < T > page = new ArrayList < T > ( ) ; pages . add ( page ) ; for ( int i = 0 ; i < ps ; i ++ ) { T ob ; if ( ! it . hasNext ( ) ) { break ; } ob = it . next ( ) ; page . add ( ob ) ; } }
public class ConfluentRegistryAvroDeserializationSchema { /** * Creates { @ link AvroDeserializationSchema } that produces classes that were generated from avro * schema and looks up writer schema in Confluent Schema Registry . * @ param tClass class of record to be produced * @ param url url of schema registry to connect * @ param identityMapCapacity maximum number of cached schema versions ( default : 1000) * @ return deserialized record */ public static < T extends SpecificRecord > ConfluentRegistryAvroDeserializationSchema < T > forSpecific ( Class < T > tClass , String url , int identityMapCapacity ) { } }
return new ConfluentRegistryAvroDeserializationSchema < > ( tClass , null , new CachedSchemaCoderProvider ( url , identityMapCapacity ) ) ;
public class AVObject { /** * save / update with server . */ protected JSONObject generateChangedParam ( ) { } }
if ( totallyOverwrite ) { HashMap < String , Object > tmp = new HashMap < > ( ) ; tmp . putAll ( this . serverData ) ; // createdAt , updatedAt , objectId is immutable . tmp . remove ( KEY_CREATED_AT ) ; tmp . remove ( KEY_UPDATED_AT ) ; tmp . remove ( KEY_OBJECT_ID ) ; return new JSONObject ( tmp ) ; } Map < String , Object > params = new HashMap < String , Object > ( ) ; Set < Map . Entry < String , ObjectFieldOperation > > entries = operations . entrySet ( ) ; for ( Map . Entry < String , ObjectFieldOperation > entry : entries ) { // { " attr " : { " _ _ op " : " Add " , " objects " : [ obj1 , obj2 ] } } Map < String , Object > oneOp = entry . getValue ( ) . encode ( ) ; params . putAll ( oneOp ) ; } if ( null != this . acl ) { AVACL serverACL = generateACLFromServerData ( ) ; if ( ! this . acl . equals ( serverACL ) ) { // only append acl request when modified . ObjectFieldOperation op = OperationBuilder . gBuilder . create ( OperationBuilder . OperationType . Set , KEY_ACL , acl ) ; params . putAll ( op . encode ( ) ) ; } } if ( ! needBatchMode ( ) ) { return new JSONObject ( params ) ; } List < Map < String , Object > > finalParams = new ArrayList < Map < String , Object > > ( ) ; Map < String , Object > topParams = Utils . makeCompletedRequest ( getObjectId ( ) , getRequestRawEndpoint ( ) , getRequestMethod ( ) , params ) ; if ( null != topParams ) { finalParams . add ( topParams ) ; } for ( ObjectFieldOperation ops : this . operations . values ( ) ) { if ( ops instanceof CompoundOperation ) { List < Map < String , Object > > restParams = ( ( CompoundOperation ) ops ) . encodeRestOp ( this ) ; if ( null != restParams && ! restParams . isEmpty ( ) ) { finalParams . addAll ( restParams ) ; } } } Map < String , Object > finalResult = new HashMap < String , Object > ( 1 ) ; finalResult . put ( "requests" , finalParams ) ; return new JSONObject ( finalResult ) ;
public class SharedMappedFilesCache { /** * For tests only ! ! ! */ public static void invalidate ( ) { } }
final SharedMappedFilesCache oldCache ; synchronized ( syncObject ) { oldCache = SharedMappedFilesCache . theCache ; SharedMappedFilesCache . theCache = null ; } if ( oldCache != null ) { for ( final SharedMappedByteBuffer buffer : oldCache . cache . values ( ) ) { buffer . close ( ) ; } }
public class LabelledEvents { /** * Marks the end of a query identified by the provided correlationId * @ param query - Query data * @ param correlationId - Identifier * @ param label - allows queries to be grouped by type * @ return RemoveLabelledQuery event to pass to the Events systems EventBus */ public static < T > RemoveLabelledQuery < T > finish ( T query , long correlationId , String label ) { } }
return new RemoveLabelledQuery < > ( RequestData . builder ( ) . query ( query ) . correlationId ( correlationId ) . type ( label ) . build ( ) ) ;
public class TaxinvoiceServiceImp { /** * / * ( non - Javadoc ) * @ see com . popbill . api . TaxinvoiceService # update ( java . lang . String , com . popbill . api . taxinvoice . MgtKeyType , java . lang . String , com . popbill . api . taxinvoice . Taxinvoice ) */ @ Override public Response update ( String CorpNum , MgtKeyType KeyType , String MgtKey , Taxinvoice taxinvoice ) throws PopbillException { } }
return update ( CorpNum , KeyType , MgtKey , taxinvoice , null ) ;
public class Address { /** * { @ inheritDoc } */ public int compareTo ( Address a ) { } }
int compare = workManagerId . compareTo ( a . getWorkManagerId ( ) ) ; if ( compare != 0 ) return compare ; compare = workManagerName . compareTo ( a . getWorkManagerName ( ) ) ; if ( compare != 0 ) return compare ; if ( transportId != null ) { if ( a . getTransportId ( ) != null ) { return transportId . compareTo ( a . getTransportId ( ) ) ; } else { return 1 ; } } else { if ( a . getTransportId ( ) != null ) { return - 1 ; } } return 0 ;
public class ReportsRegistry { /** * Initializes the set of report implementation . */ public static void init ( ) { } }
reports . clear ( ) ; Reflections reflections = new Reflections ( REPORTS_PACKAGE ) ; final Set < Class < ? extends Report > > reportClasses = reflections . getSubTypesOf ( Report . class ) ; for ( Class < ? extends Report > c : reportClasses ) { LOG . info ( "Report class: " + c . getName ( ) ) ; try { reports . add ( c . newInstance ( ) ) ; } catch ( IllegalAccessException | InstantiationException e ) { LOG . error ( "Error while loading report implementation classes" , e ) ; } } if ( LOG . isInfoEnabled ( ) ) { LOG . info ( String . format ( "Detected %s reports" , reports . size ( ) ) ) ; }
public class BPMImporter { /** * { @ inheritDoc } */ @ Override protected String evalProgramName ( ) throws InstallationException { } }
String ret = "" ; try { final DocumentBuilderFactory dbFactory = DocumentBuilderFactory . newInstance ( ) ; final DocumentBuilder dBuilder = dbFactory . newDocumentBuilder ( ) ; final Document doc = dBuilder . parse ( getUrl ( ) . openStream ( ) , AbstractSourceImporter . ENCODING ) ; doc . getDocumentElement ( ) . normalize ( ) ; for ( final String tagName : BPMImporter . PROCESSTAGNAMES ) { final NodeList processNodeList = doc . getElementsByTagName ( tagName ) ; if ( processNodeList != null && processNodeList . getLength ( ) > 0 ) { final Node processNode = processNodeList . item ( 0 ) ; if ( processNode . getNodeType ( ) == Node . ELEMENT_NODE ) { final Element eElement = ( Element ) processNode ; ret = eElement . getAttribute ( "id" ) ; } break ; } } } catch ( final ParserConfigurationException e ) { throw new InstallationException ( "could not Parse the given URL" , e ) ; } catch ( final SAXException e ) { throw new InstallationException ( "could not Parse the given URL" , e ) ; } catch ( final IOException e ) { throw new InstallationException ( "could not Read the given URL" , e ) ; } return ret ;
public class OracleHelper { /** * Determine if the top level exception is an authorization exception . * Chained exceptions are not checked . * Look for the JDBC 4.0 exception subclass * or an Oracle error code in ( 1004 , 1005 , 1017) * @ param x the exception to check . * @ return true or false to indicate if the exception is an authorization error . * @ throws NullPointerException if a NULL exception parameter is supplied . */ boolean isAuthException ( SQLException x ) { } }
return x instanceof SQLInvalidAuthorizationSpecException || 1004 == x . getErrorCode ( ) // default username feature not supported ; logon denied || 1005 == x . getErrorCode ( ) // null password given ; logon denied || 1017 == x . getErrorCode ( ) ; // invalid username / password ; logon denied
public class AbstractMarshaller { /** * Unmarshals the given provided { @ code javax . xml . transform . Source } into an object graph . * < p > This implementation inspects the given result , and calls { @ code unmarshalDomSource } , * { @ code unmarshalSaxSource } , or { @ code unmarshalStreamSource } . * @ param source the source to marshal from * @ return the object graph * @ throws IOException if an I / O Exception occurs * @ throws XmlMappingException if the given source cannot be mapped to an object * @ throws IllegalArgumentException if { @ code source } is neither a { @ code DOMSource } , * a { @ code SAXSource } , nor a { @ code StreamSource } * @ see # unmarshalDomSource ( javax . xml . transform . dom . DOMSource ) * @ see # unmarshalSaxSource ( javax . xml . transform . sax . SAXSource ) * @ see # unmarshalStreamSource ( javax . xml . transform . stream . StreamSource ) */ @ Override public final Object unmarshal ( Source source ) throws IOException , XmlMappingException { } }
if ( source instanceof DOMSource ) { return unmarshalDomSource ( ( DOMSource ) source ) ; } else if ( StaxUtils . isStaxSource ( source ) ) { return unmarshalStaxSource ( source ) ; } else if ( source instanceof SAXSource ) { return unmarshalSaxSource ( ( SAXSource ) source ) ; } else if ( source instanceof StreamSource ) { return unmarshalStreamSource ( ( StreamSource ) source ) ; } else { throw new IllegalArgumentException ( "Unknown Source type: " + source . getClass ( ) ) ; }
public class MwRevisionDumpFileProcessor { /** * Tries to processes current XML starting from a & lt ; page & gt ; start tag up * to the corresponding end tag using { @ link # processXmlPage ( ) } . If this * fails for some reason , it tries to recover to read all remaining page * blocks nonetheless . * @ throws XMLStreamException * if there was a problem reading the XML */ void tryProcessXmlPage ( ) throws XMLStreamException { } }
try { processXmlPage ( ) ; } catch ( MwDumpFormatException e ) { MwRevisionDumpFileProcessor . logger . error ( "Error when trying to process revision block for page \"" + this . mwRevision . getPrefixedTitle ( ) + "\" (namespace " + this . mwRevision . getNamespace ( ) + ", id " + this . mwRevision . getPageId ( ) + "): " + e . toString ( ) ) ; MwRevisionDumpFileProcessor . logger . info ( "Trying to recover ..." ) ; while ( this . xmlReader . hasNext ( ) ) { this . xmlReader . next ( ) ; if ( this . xmlReader . getEventType ( ) == XMLStreamConstants . END_ELEMENT && this . xmlReader . getLocalName ( ) . equals ( MwRevisionDumpFileProcessor . E_PAGE ) ) { MwRevisionDumpFileProcessor . logger . info ( "... recovery successful. Continuing processing." ) ; return ; } } MwRevisionDumpFileProcessor . logger . error ( "Recovery failed. Could not process remaining XML." ) ; }
public class DeleteScalingPolicyRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( DeleteScalingPolicyRequest deleteScalingPolicyRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( deleteScalingPolicyRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( deleteScalingPolicyRequest . getName ( ) , NAME_BINDING ) ; protocolMarshaller . marshall ( deleteScalingPolicyRequest . getFleetId ( ) , FLEETID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class MapUtils { /** * Returns the length of the longest key in a map , or 0 if the map is empty . Useful for printing * tables , etc . The map may not have any null keys . */ public static < V > int longestKeyLength ( Map < String , V > map ) { } }
if ( map . isEmpty ( ) ) { return 0 ; } return Ordering . natural ( ) . max ( FluentIterable . from ( map . keySet ( ) ) . transform ( StringUtils . lengthFunction ( ) ) ) ;
public class ConnectionToAppleServer { /** * Generic SSLSocketFactory builder * @ param trustManagers * @ return SSLSocketFactory * @ throws KeystoreException */ protected SSLSocketFactory createSSLSocketFactoryWithTrustManagers ( TrustManager [ ] trustManagers ) throws KeystoreException { } }
logger . debug ( "Creating SSLSocketFactory" ) ; // Get a KeyManager and initialize it try { KeyStore keystore = getKeystore ( ) ; KeyManagerFactory kmf = KeyManagerFactory . getInstance ( ALGORITHM ) ; try { char [ ] password = KeystoreManager . getKeystorePasswordForSSL ( server ) ; kmf . init ( keystore , password ) ; } catch ( Exception e ) { e = KeystoreManager . wrapKeystoreException ( e ) ; throw e ; } // Get the SSLContext to help create SSLSocketFactory SSLContext sslc = SSLContext . getInstance ( PROTOCOL ) ; sslc . init ( kmf . getKeyManagers ( ) , trustManagers , null ) ; return sslc . getSocketFactory ( ) ; } catch ( Exception e ) { throw new KeystoreException ( "Keystore exception: " + e . getMessage ( ) , e ) ; }