signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class JobFile { /** * ( non - Javadoc ) * @ see org . apache . hadoop . io . Writable # readFields ( java . io . DataInput ) */ @ Override public void readFields ( DataInput in ) throws IOException { } }
this . filename = Text . readString ( in ) ; this . jobid = Text . readString ( in ) ; this . isJobConfFile = in . readBoolean ( ) ; this . isJobHistoryFile = in . readBoolean ( ) ;
public class UnsyncStack { /** * Removes the object at the top of this stack and returns that object as the * value of this function . * @ return The object at the top of this stack ( the last item of the * < tt > ArrayList < / tt > object ) . * @ exception EmptyStackException if this stack is empty . */ public E pop ( ) { } }
E obj ; int len = size ( ) ; obj = peek ( ) ; remove ( len - 1 ) ; return obj ;
public class RetryingCacheInvoker { /** * { @ inheritDoc } * @ see org . jboss . as . clustering . infinispan . invoker . CacheInvoker # invoke ( org . infinispan . Cache , org . jboss . as . clustering . infinispan . invoker . CacheInvoker . Operation ) */ @ Override public < K , V , R > R invoke ( Cache < K , V > cache , Operation < K , V , R > operation , Flag ... allFlags ) { } }
Flag [ ] attemptFlags = null ; // attemptFlags = allFlags - Flag . FAIL _ SILENTLY if ( ( allFlags != null ) && ( allFlags . length > 0 ) ) { Set < Flag > flags = EnumSet . noneOf ( Flag . class ) ; flags . addAll ( Arrays . asList ( allFlags ) ) ; flags . remove ( Flag . FAIL_SILENTLY ) ; attemptFlags = flags . toArray ( new Flag [ flags . size ( ) ] ) ; } Exception exception = null ; for ( int i = 0 ; i <= this . backOffIntervals . length ; ++ i ) { // Make sure Flag . FAIL _ SILENTLY , if specified , is applied to the last try only try { return this . invoker . invoke ( cache , operation , ( i < this . backOffIntervals . length ) ? attemptFlags : allFlags ) ; } catch ( TimeoutException e ) { exception = e ; } if ( i < this . backOffIntervals . length ) { int delay = this . backOffIntervals [ i ] ; try { if ( ROOT_LOGGER . isTraceEnabled ( ) ) { ROOT_LOGGER . tracef ( exception , "Cache operation failed. Retrying in %d ms" , Integer . valueOf ( delay ) ) ; } Thread . sleep ( delay ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; } } } throw MESSAGES . abortingCacheOperation ( exception , this . backOffIntervals . length + 1 ) ;
public class ConnectedComponents { public static void main ( String ... args ) throws Exception { } }
// Checking input parameters final ParameterTool params = ParameterTool . fromArgs ( args ) ; // set up execution environment ExecutionEnvironment env = ExecutionEnvironment . getExecutionEnvironment ( ) ; final int maxIterations = params . getInt ( "iterations" , 10 ) ; // make parameters available in the web interface env . getConfig ( ) . setGlobalJobParameters ( params ) ; // read vertex and edge data DataSet < Long > vertices = getVertexDataSet ( env , params ) ; DataSet < Tuple2 < Long , Long > > edges = getEdgeDataSet ( env , params ) . flatMap ( new UndirectEdge ( ) ) ; // assign the initial components ( equal to the vertex id ) DataSet < Tuple2 < Long , Long > > verticesWithInitialId = vertices . map ( new DuplicateValue < Long > ( ) ) ; // open a delta iteration DeltaIteration < Tuple2 < Long , Long > , Tuple2 < Long , Long > > iteration = verticesWithInitialId . iterateDelta ( verticesWithInitialId , maxIterations , 0 ) ; // apply the step logic : join with the edges , select the minimum neighbor , update if the component of the candidate is smaller DataSet < Tuple2 < Long , Long > > changes = iteration . getWorkset ( ) . join ( edges ) . where ( 0 ) . equalTo ( 0 ) . with ( new NeighborWithComponentIDJoin ( ) ) . groupBy ( 0 ) . aggregate ( Aggregations . MIN , 1 ) . join ( iteration . getSolutionSet ( ) ) . where ( 0 ) . equalTo ( 0 ) . with ( new ComponentIdFilter ( ) ) ; // close the delta iteration ( delta and new workset are identical ) DataSet < Tuple2 < Long , Long > > result = iteration . closeWith ( changes , changes ) ; // emit result if ( params . has ( "output" ) ) { result . writeAsCsv ( params . get ( "output" ) , "\n" , " " ) ; // execute program env . execute ( "Connected Components Example" ) ; } else { System . out . println ( "Printing result to stdout. Use --output to specify output path." ) ; result . print ( ) ; }
public class ModifyRuleRequest { /** * The conditions . Each condition specifies a field name and a single value . * If the field name is < code > host - header < / code > , you can specify a single host name ( for example , my . example . com ) . * A host name is case insensitive , can be up to 128 characters in length , and can contain any of the following * characters . You can include up to three wildcard characters . * < ul > * < li > * A - Z , a - z , 0-9 * < / li > * < li > * < / li > * < li > * * ( matches 0 or more characters ) * < / li > * < li > * ? ( matches exactly 1 character ) * < / li > * < / ul > * If the field name is < code > path - pattern < / code > , you can specify a single path pattern . A path pattern is * case - sensitive , can be up to 128 characters in length , and can contain any of the following characters . You can * include up to three wildcard characters . * < ul > * < li > * A - Z , a - z , 0-9 * < / li > * < li > * < / li > * < li > * & amp ; ( using & amp ; amp ; ) * < / li > * < li > * * ( matches 0 or more characters ) * < / li > * < li > * ? ( matches exactly 1 character ) * < / li > * < / ul > * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setConditions ( java . util . Collection ) } or { @ link # withConditions ( java . util . Collection ) } if you want to * override the existing values . * @ param conditions * The conditions . Each condition specifies a field name and a single value . < / p > * If the field name is < code > host - header < / code > , you can specify a single host name ( for example , * my . example . com ) . A host name is case insensitive , can be up to 128 characters in length , and can contain * any of the following characters . You can include up to three wildcard characters . * < ul > * < li > * A - Z , a - z , 0-9 * < / li > * < li > * < / li > * < li > * ( matches 0 or more characters ) * < / li > * < li > * ? ( matches exactly 1 character ) * < / li > * < / ul > * If the field name is < code > path - pattern < / code > , you can specify a single path pattern . A path pattern is * case - sensitive , can be up to 128 characters in length , and can contain any of the following characters . * You can include up to three wildcard characters . * < ul > * < li > * A - Z , a - z , 0-9 * < / li > * < li > * < / li > * < li > * & amp ; ( using & amp ; amp ; ) * < / li > * < li > * ( matches 0 or more characters ) * < / li > * < li > * ? ( matches exactly 1 character ) * < / li > * @ return Returns a reference to this object so that method calls can be chained together . */ public ModifyRuleRequest withConditions ( RuleCondition ... conditions ) { } }
if ( this . conditions == null ) { setConditions ( new java . util . ArrayList < RuleCondition > ( conditions . length ) ) ; } for ( RuleCondition ele : conditions ) { this . conditions . add ( ele ) ; } return this ;
public class TagUtils { /** * String - - > long * @ param value * @ return */ public static float getFloat ( Object value ) { } }
if ( value == null ) { return 0 ; } return Float . valueOf ( value . toString ( ) ) . floatValue ( ) ;
public class PlayJUnit4Provider { /** * Copy of AbstractPlayMojo . getPlayHome ( ) method ( with getCanonicalPath ( ) changed to getAbsolutePath ( ) ) */ private File getPlayHome ( File applicationPath ) throws TestSetFailedException { } }
File targetDir = new File ( applicationPath , "target" ) ; File playTmpDir = new File ( targetDir , "play" ) ; File playTmpHomeDir = new File ( playTmpDir , "home" ) ; if ( ! playTmpHomeDir . exists ( ) ) { throw new TestSetFailedException ( String . format ( "Play! home directory \"%s\" does not exist" , playTmpHomeDir . getAbsolutePath ( ) ) ) ; } if ( ! playTmpHomeDir . isDirectory ( ) ) { throw new TestSetFailedException ( String . format ( "Play! home directory \"%s\" is not a directory" , playTmpHomeDir . getAbsolutePath ( ) ) ) ; } // Additional check whether the temporary Play ! home directory is created by this plugin File warningFile = new File ( playTmpHomeDir , "WARNING.txt" ) ; if ( warningFile . exists ( ) ) { if ( ! warningFile . isFile ( ) ) { throw new TestSetFailedException ( String . format ( "Play! home directory warning file \"%s\" is not a file" , warningFile . getAbsolutePath ( ) ) ) ; } } else { throw new TestSetFailedException ( String . format ( "Play! home directory warning file \"%s\" does not exist" , warningFile . getAbsolutePath ( ) ) ) ; } return playTmpHomeDir ;
public class TypicalFaihyApiFailureHook { protected FaihyFailureErrorPart createSimpleError ( String field , String code , String serverManaged ) { } }
return newFailureErrorPart ( field , code , Collections . emptyMap ( ) , serverManaged ) ;
public class AmazonPinpointEmailClient { /** * Delete a dedicated IP pool . * @ param deleteDedicatedIpPoolRequest * A request to delete a dedicated IP pool . * @ return Result of the DeleteDedicatedIpPool operation returned by the service . * @ throws NotFoundException * The resource you attempted to access doesn ' t exist . * @ throws TooManyRequestsException * Too many requests have been made to the operation . * @ throws BadRequestException * The input you provided is invalid . * @ throws ConcurrentModificationException * The resource is being modified by another operation or thread . * @ sample AmazonPinpointEmail . DeleteDedicatedIpPool * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / pinpoint - email - 2018-07-26 / DeleteDedicatedIpPool " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DeleteDedicatedIpPoolResult deleteDedicatedIpPool ( DeleteDedicatedIpPoolRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDeleteDedicatedIpPool ( request ) ;
public class SimulatedTaskRunner { /** * Add the specified TaskInProgress to the priority queue of tasks to finish . * @ param tip * @ param umbilicalProtocol */ protected void addTipToFinish ( TaskInProgress tip , TaskUmbilicalProtocol umbilicalProtocol ) { } }
long currentTime = System . currentTimeMillis ( ) ; long finishTime = currentTime + Math . abs ( rand . nextLong ( ) ) % timeToFinishTask ; LOG . info ( "Adding TIP " + tip . getTask ( ) . getTaskID ( ) + " to finishing queue with start time " + currentTime + " and finish time " + finishTime + " (" + ( ( finishTime - currentTime ) / 1000.0 ) + " sec) to thread " + getName ( ) ) ; TipToFinish ttf = new TipToFinish ( tip , finishTime , umbilicalProtocol ) ; tipQueue . put ( ttf ) ; // Interrupt the waiting thread . We could put in additional logic to only // interrupt when necessary , but probably not worth the complexity . this . interrupt ( ) ;
public class DataSink { /** * Sorts each local partition of a { @ link org . apache . flink . api . java . tuple . Tuple } data set * on the specified field in the specified { @ link Order } before it is emitted by the output format . * < p > < b > Note : Only tuple data sets can be sorted using integer field indices . < / b > * < p > The tuple data set can be sorted on multiple fields in different orders * by chaining { @ link # sortLocalOutput ( int , Order ) } calls . * @ param field The Tuple field on which the data set is locally sorted . * @ param order The Order in which the specified Tuple field is locally sorted . * @ return This data sink operator with specified output order . * @ see org . apache . flink . api . java . tuple . Tuple * @ see Order * @ deprecated Use { @ link DataSet # sortPartition ( int , Order ) } instead */ @ Deprecated @ PublicEvolving public DataSink < T > sortLocalOutput ( int field , Order order ) { } }
// get flat keys Keys . ExpressionKeys < T > ek = new Keys . ExpressionKeys < > ( field , this . type ) ; int [ ] flatKeys = ek . computeLogicalKeyPositions ( ) ; if ( ! Keys . ExpressionKeys . isSortKey ( field , this . type ) ) { throw new InvalidProgramException ( "Selected sort key is not a sortable type" ) ; } if ( this . sortKeyPositions == null ) { // set sorting info this . sortKeyPositions = flatKeys ; this . sortOrders = new Order [ flatKeys . length ] ; Arrays . fill ( this . sortOrders , order ) ; } else { // append sorting info to exising info int oldLength = this . sortKeyPositions . length ; int newLength = oldLength + flatKeys . length ; this . sortKeyPositions = Arrays . copyOf ( this . sortKeyPositions , newLength ) ; this . sortOrders = Arrays . copyOf ( this . sortOrders , newLength ) ; for ( int i = 0 ; i < flatKeys . length ; i ++ ) { this . sortKeyPositions [ oldLength + i ] = flatKeys [ i ] ; this . sortOrders [ oldLength + i ] = order ; } } return this ;
public class ExtjsConditionManager { /** * add each message to MessageBoxFailCondition * @ param messages messages * @ return this */ public ExtjsConditionManager addFailConditions ( String ... messages ) { } }
for ( String message : messages ) { if ( message != null && message . length ( ) > 0 ) { this . add ( new MessageBoxFailCondition ( message ) ) ; } } return this ;
public class CheapIntMap { /** * Returns the number of mappings in this table . */ public int size ( ) { } }
int size = 0 ; for ( int ii = 0 , ll = _keys . length ; ii < ll ; ii ++ ) { if ( _keys [ ii ] != - 1 ) { size ++ ; } } return size ;
public class DistributedLock { /** * 尝试获取锁对象 , 不会阻塞 * @ throws InterruptedException * @ throws KeeperException */ public boolean tryLock ( ) throws KeeperException { } }
// 可能初始化的时候就失败了 if ( exception != null ) { throw exception ; } if ( isOwner ( ) ) { // 锁重入 return true ; } acquireLock ( null ) ; if ( exception != null ) { unlock ( ) ; throw exception ; } if ( interrupt != null ) { unlock ( ) ; Thread . currentThread ( ) . interrupt ( ) ; } if ( other != null ) { unlock ( ) ; throw new NestableRuntimeException ( other ) ; } return isOwner ( ) ;
public class Stock { /** * Requests the historical quotes for this stock with the following characteristics . * < ul > * < li > from : specified value * < li > to : today ( default ) * < li > interval : MONTHLY ( default ) * < / ul > * @ param from start date of the historical data * @ return a list of historical quotes from this stock * @ throws java . io . IOException when there ' s a connection problem * @ see # getHistory ( ) */ public List < HistoricalQuote > getHistory ( Calendar from ) throws IOException { } }
return this . getHistory ( from , HistQuotesRequest . DEFAULT_TO ) ;
public class RBBINode { /** * / CLOVER : OFF */ static void printNode ( RBBINode n ) { } }
if ( n == null ) { System . out . print ( " -- null --\n" ) ; } else { RBBINode . printInt ( n . fSerialNum , 10 ) ; RBBINode . printString ( nodeTypeNames [ n . fType ] , 11 ) ; RBBINode . printInt ( n . fParent == null ? 0 : n . fParent . fSerialNum , 11 ) ; RBBINode . printInt ( n . fLeftChild == null ? 0 : n . fLeftChild . fSerialNum , 11 ) ; RBBINode . printInt ( n . fRightChild == null ? 0 : n . fRightChild . fSerialNum , 12 ) ; RBBINode . printInt ( n . fFirstPos , 12 ) ; RBBINode . printInt ( n . fVal , 7 ) ; if ( n . fType == varRef ) { System . out . print ( " " + n . fText ) ; } } System . out . println ( "" ) ;
public class StorableGenerator { /** * Loads the property value of the current storable onto the stack . If the * property is derived the read method is used , otherwise it just loads the * value from the appropriate field . * entry stack : [ * exit stack : [ value * @ param b - { @ link CodeBuilder } to which to add the load code * @ param property - property to load */ private void loadThisProperty ( CodeBuilder b , StorableProperty property ) { } }
loadThisProperty ( b , property , TypeDesc . forClass ( property . getType ( ) ) ) ;
public class MemcachedBackupSession { /** * { @ inheritDoc } */ @ Override public Object getAttribute ( final String name ) { } }
if ( filterAttribute ( name ) ) { _attributesAccessed = true ; } return super . getAttribute ( name ) ;
public class ReaderFactory { /** * Create a Reader for the given URL . * @ param url * the url , not null * @ return the reader , never null * @ throws IOException * If any I / O error occur on reading the URL . */ public Reader create ( URL url ) throws IOException { } }
return new InputStreamReader ( openStream ( url ) , StandardCharsets . UTF_8 ) ;
public class ClientUtils { /** * Method posts request payload * @ param request * @ param payload * @ return */ protected HttpResponse sendPayload ( HttpEntityEnclosingRequestBase request , byte [ ] payload , HttpHost proxy ) { } }
HttpResponse response = null ; try { HttpClient httpclient = new DefaultHttpClient ( ) ; if ( proxy != null ) { httpclient . getParams ( ) . setParameter ( ConnRoutePNames . DEFAULT_PROXY , proxy ) ; } request . setEntity ( new ByteArrayEntity ( payload ) ) ; log ( request ) ; response = httpclient . execute ( request ) ; } catch ( IOException ioe ) { throw new EFhirClientException ( "Error sending HTTP Post/Put Payload" , ioe ) ; } return response ;
public class PrometheusBuilder { /** * Create the Prometheus metric name by sanitizing some characters */ private static String getPrometheusMetricName ( String name ) { } }
String out = name . replaceAll ( "(?<!^|:)(\\p{Upper})(?=\\p{Lower})" , "_$1" ) ; out = out . replaceAll ( "(?<=\\p{Lower})(\\p{Upper})" , "_$1" ) . toLowerCase ( ) ; out = out . replaceAll ( "[-_.\\s]+" , "_" ) ; out = out . replaceAll ( "^_*(.*?)_*$" , "$1" ) ; return out ;
public class VoltZK { /** * get MigratePartitionLeader information */ public static MigratePartitionLeaderInfo getMigratePartitionLeaderInfo ( ZooKeeper zk ) { } }
try { byte [ ] data = zk . getData ( migrate_partition_leader_info , null , null ) ; if ( data != null ) { MigratePartitionLeaderInfo info = new MigratePartitionLeaderInfo ( data ) ; return info ; } } catch ( KeeperException | InterruptedException | JSONException e ) { } return null ;
public class MathPlag { /** * Compare two MathML formulas . The return value is a map of similarity factors like matching depth , * element coverage , indicator for structural or data match and if the comparison formula holds an * equation . * @ param refMathML Reference MathML string ( must contain pMML and cMML ) * @ param compMathML Comparison MathML string ( must contain pMML and cMML ) * @ return map of all found factors ( depth , coverage , structureMatch , dataMatch , isEquation ) * @ throws XPathExpressionException could hint towards a bug */ public static Map < String , Object > compareOriginalFactors ( String refMathML , String compMathML ) throws XPathExpressionException { } }
try { CMMLInfo refDoc = new CMMLInfo ( refMathML ) ; CMMLInfo compDoc = new CMMLInfo ( compMathML ) ; // compute factors final Integer depth = compDoc . getDepth ( refDoc . getXQuery ( ) ) ; final Double coverage = compDoc . getCoverage ( refDoc . getElements ( ) ) ; Boolean formula = compDoc . isEquation ( true ) ; Boolean structMatch = compDoc . toStrictCmml ( ) . abstract2CDs ( ) . isMatch ( refDoc . toStrictCmml ( ) . abstract2CDs ( ) . getXQuery ( ) ) ; Boolean dataMatch = new CMMLInfo ( compMathML ) . toStrictCmml ( ) . abstract2DTs ( ) . isMatch ( new CMMLInfo ( refMathML ) . toStrictCmml ( ) . abstract2DTs ( ) . getXQuery ( ) ) ; HashMap < String , Object > result = new HashMap < > ( ) ; result . put ( "depth" , depth ) ; result . put ( "coverage" , coverage ) ; result . put ( "structureMatch" , structMatch ) ; result . put ( "dataMatch" , dataMatch ) ; result . put ( "isEquation" , formula ) ; return result ; } catch ( Exception e ) { // log and throw in this case logger . error ( String . format ( "mathml comparison failed (refMathML: %s) (compMathML: %s)" , refMathML , compMathML ) , e ) ; throw e ; }
public class ListUtils { /** * 过滤 */ public static < E > List < E > filter ( final List < E > list , Filter < E > filter ) { } }
List < E > newList = new ArrayList < E > ( ) ; if ( list != null && list . size ( ) != 0 ) { for ( E e : list ) { if ( filter . filter ( e ) ) { newList . add ( e ) ; } } } return newList ;
public class Ifc2x3tc1PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getIfcElectricHeaterType ( ) { } }
if ( ifcElectricHeaterTypeEClass == null ) { ifcElectricHeaterTypeEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc2x3tc1Package . eNS_URI ) . getEClassifiers ( ) . get ( 190 ) ; } return ifcElectricHeaterTypeEClass ;
public class HelpView { /** * Retrieves the help topic associated with the given node . Note that this also maintains a map * of topic id - - & gt ; topic mappings in the parent help set . * @ param node Tree node . * @ return A help topic instance . */ protected HelpTopic getTopic ( TreeNode node ) { } }
try { DefaultMutableTreeNode nd = ( DefaultMutableTreeNode ) node ; TreeItem item = ( TreeItem ) nd . getUserObject ( ) ; ID id = item . getID ( ) ; HelpTopic topic = new HelpTopic ( id == null ? null : id . getURL ( ) , item . getName ( ) , view . getHelpSet ( ) . getTitle ( ) ) ; if ( id != null && view . getHelpSet ( ) . getKeyData ( "topics" , id . id ) == null ) { view . getHelpSet ( ) . setKeyData ( "topics" , id . id , topic ) ; } return topic ; } catch ( MalformedURLException e ) { return null ; }
public class CoronaJobTracker { @ Override public void grantResource ( String handle , List < ResourceGrant > granted ) { } }
String msg = "Received " + granted . size ( ) + " new grants " ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( msg + granted . toString ( ) ) ; } else { LOG . info ( msg ) ; } // This is unnecessary , but nice error messages look better than NPEs if ( resourceTracker != null ) { resourceTracker . addNewGrants ( granted ) ; } else { LOG . error ( "Grant received but ResourceTracker was uninitialized." ) ; }
public class GroupTemplate { /** * 得到模板 , 并指明父模板 * @ param key * @ param parent * @ return */ public Template getTemplate ( String key , String parent , ResourceLoader loader ) { } }
Template template = this . getTemplate ( key , loader ) ; template . isRoot = false ; return template ;
public class Ix { /** * Emits a range of characters from the given CharSequence as integer values . * The result ' s iterator ( ) doesn ' t support remove ( ) . * @ param cs the source character sequence , not null * @ param start the start character index , inclusive , non - negative * @ param end the end character index , exclusive , non - negative * @ return the new Ix instance * @ throws NullPointerException if cs is null * @ throws IndexOutOfBoundsException if start is out of range [ 0 , cs . length ] * @ since 1.0 */ public static Ix < Integer > characters ( CharSequence cs , int start , int end ) { } }
int len = cs . length ( ) ; if ( start < 0 || end < 0 || start > len || end > len ) { throw new IndexOutOfBoundsException ( "start=" + start + ", end=" + end + ", length=" + len ) ; } return new IxCharacters ( cs , start , end ) ;
public class ContentStoreImpl { /** * { @ inheritDoc } */ @ Override public String copyContent ( final String srcSpaceId , final String srcContentId , final String destStoreId , final String destSpaceId , final String destContentId ) throws ContentStoreException { } }
return execute ( new Retriable ( ) { @ Override public String retry ( ) throws ContentStoreException { // The actual method being executed return doCopyContent ( srcSpaceId , srcContentId , destStoreId , destSpaceId , destContentId ) ; } } ) ;
public class UAgentInfo { /** * The longer and more thorough way to detect for a mobile device . * Will probably detect most feature phones , * smartphone - class devices , Internet Tablets , * Internet - enabled game consoles , etc . * This ought to catch a lot of the more obscure and older devices , also - - * but no promises on thoroughness ! * @ return detection of any mobile device using the more thorough method */ public boolean detectMobileLong ( ) { } }
if ( detectMobileQuick ( ) || detectGameConsole ( ) ) { return true ; } if ( detectDangerHiptop ( ) || detectMaemoTablet ( ) || detectSonyMylo ( ) || detectArchos ( ) ) { return true ; } if ( ( userAgent . indexOf ( devicePda ) != - 1 ) && ( userAgent . indexOf ( disUpdate ) < 0 ) ) // no index found { return true ; } // Detect older phones from certain manufacturers and operators . if ( ( userAgent . indexOf ( uplink ) != - 1 ) || ( userAgent . indexOf ( engineOpenWeb ) != - 1 ) || ( userAgent . indexOf ( manuSamsung1 ) != - 1 ) || ( userAgent . indexOf ( manuSonyEricsson ) != - 1 ) || ( userAgent . indexOf ( manuericsson ) != - 1 ) || ( userAgent . indexOf ( svcDocomo ) != - 1 ) || ( userAgent . indexOf ( svcKddi ) != - 1 ) || ( userAgent . indexOf ( svcVodafone ) != - 1 ) ) { return true ; } return false ;
public class AbstractExtensionFinder { /** * Returns the parameters of an { @ link Extension } annotation without loading * the corresponding class into the class loader . * @ param className name of the class , that holds the requested { @ link Extension } annotation * @ param classLoader class loader to access the class * @ return the contents of the { @ link Extension } annotation or null , if the class does not * have an { @ link Extension } annotation */ private ExtensionInfo getExtensionInfo ( String className , ClassLoader classLoader ) { } }
if ( extensionInfos == null ) { extensionInfos = new HashMap < > ( ) ; } if ( ! extensionInfos . containsKey ( className ) ) { log . trace ( "Load annotation for '{}' using asm" , className ) ; ExtensionInfo info = ExtensionInfo . load ( className , classLoader ) ; if ( info == null ) { log . warn ( "No extension annotation was found for '{}'" , className ) ; extensionInfos . put ( className , null ) ; } else { extensionInfos . put ( className , info ) ; } } return extensionInfos . get ( className ) ;
public class AppServicePlansInner { /** * Get all apps associated with an App Service plan . * Get all apps associated with an App Service plan . * @ param nextPageLink The NextLink from the previous successful call to List operation . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the PagedList & lt ; SiteInner & gt ; object */ public Observable < Page < SiteInner > > listWebAppsNextAsync ( final String nextPageLink ) { } }
return listWebAppsNextWithServiceResponseAsync ( nextPageLink ) . map ( new Func1 < ServiceResponse < Page < SiteInner > > , Page < SiteInner > > ( ) { @ Override public Page < SiteInner > call ( ServiceResponse < Page < SiteInner > > response ) { return response . body ( ) ; } } ) ;
public class AutoscalePolicyFilter { /** * Method allow to find autoscale policies by its names * Filtering is case sensitive . * @ param names is a set of names * @ return { @ link AutoscalePolicyFilter } */ public AutoscalePolicyFilter names ( String ... names ) { } }
allItemsNotNull ( names , "Autoscale policies names" ) ; predicate = predicate . and ( combine ( AutoscalePolicyMetadata :: getName , in ( names ) ) ) ; return this ;
public class ActivitySorter { /** * Recursively sort the supplied child tasks . * @ param container child tasks */ public void sort ( ChildTaskContainer container ) { } }
// Do we have any tasks ? List < Task > tasks = container . getChildTasks ( ) ; if ( ! tasks . isEmpty ( ) ) { for ( Task task : tasks ) { // Sort child activities sort ( task ) ; // Sort Order : // 1 . Activities come first // 2 . WBS come last // 3 . Activities ordered by activity ID // 4 . WBS ordered by ID Collections . sort ( tasks , new Comparator < Task > ( ) { @ Override public int compare ( Task t1 , Task t2 ) { boolean t1IsWbs = m_wbsTasks . contains ( t1 ) ; boolean t2IsWbs = m_wbsTasks . contains ( t2 ) ; // Both are WBS if ( t1IsWbs && t2IsWbs ) { return t1 . getID ( ) . compareTo ( t2 . getID ( ) ) ; } // Both are activities if ( ! t1IsWbs && ! t2IsWbs ) { String activityID1 = ( String ) t1 . getCurrentValue ( m_activityIDField ) ; String activityID2 = ( String ) t2 . getCurrentValue ( m_activityIDField ) ; if ( activityID1 == null || activityID2 == null ) { return ( activityID1 == null && activityID2 == null ? 0 : ( activityID1 == null ? 1 : - 1 ) ) ; } return activityID1 . compareTo ( activityID2 ) ; } // One activity one WBS return t1IsWbs ? 1 : - 1 ; } } ) ; } }
public class EventTimeBasedIndexNameBuilder { /** * Gets the name of the index to use for an index request * @ param event * Event for which the name of index has to be prepared * @ return index name of the form ' indexPrefix - formattedTimestamp ' */ @ Override public String getIndexName ( Event event ) { } }
String realIndexPrefix = BucketPath . escapeString ( event . getIndexPrefix ( ) != null ? event . getIndexPrefix ( ) : indexPrefix , event . getHeaders ( ) ) ; if ( event . getIndexTimestamp ( ) != null ) { String indexName = new StringBuilder ( realIndexPrefix ) . append ( '-' ) . append ( event . getIndexTimestamp ( ) ) . toString ( ) ; if ( logger . isDebugEnabled ( ) ) logger . debug ( "Index Name = " + indexName ) ; return indexName ; } else { TimestampedEvent timestampedEvent = new TimestampedEvent ( event ) ; long timestamp = timestampedEvent . getTimestamp ( ) ; String indexName = new StringBuilder ( realIndexPrefix ) . append ( '-' ) . append ( fastDateFormat . format ( timestamp ) ) . toString ( ) ; if ( logger . isDebugEnabled ( ) ) logger . debug ( "Index Name = " + indexName ) ; return indexName ; }
public class PolicyEventsInner { /** * Gets OData metadata XML document . * @ param scope A valid scope , i . e . management group , subscription , resource group , or resource ID . Scope used has no effect on metadata returned . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the String object */ public Observable < String > getMetadataAsync ( String scope ) { } }
return getMetadataWithServiceResponseAsync ( scope ) . map ( new Func1 < ServiceResponse < String > , String > ( ) { @ Override public String call ( ServiceResponse < String > response ) { return response . body ( ) ; } } ) ;
public class HelloSignClient { /** * Returns true if an account exists with the provided email address . Note * this is limited to the visibility of the currently authenticated user . * @ param email String email address * @ return true if the account exists , false otherwise * @ throws HelloSignException Thrown if there ' s a problem communicating with * the HelloSign API . */ public boolean isAccountValid ( String email ) throws HelloSignException { } }
if ( email == null || email . isEmpty ( ) ) { return false ; } Account account = new Account ( httpClient . withAuth ( auth ) . withPostField ( Account . ACCOUNT_EMAIL_ADDRESS , email ) . post ( BASE_URI + VALIDATE_ACCOUNT_URI ) . asJson ( ) ) ; return ( account . hasEmail ( ) && email . equalsIgnoreCase ( account . getEmail ( ) ) ) ;
public class ListContainersResult { /** * The names of the containers . * @ param containers * The names of the containers . */ public void setContainers ( java . util . Collection < Container > containers ) { } }
if ( containers == null ) { this . containers = null ; return ; } this . containers = new java . util . ArrayList < Container > ( containers ) ;
public class ServerFactory { /** * Allows the creation of a Server instance with various configurations * @ return a Server instance */ public static Server createServer ( ServerID serverID , io . grpc . Server rpcServer , LockManager lockManager , AttributeDeduplicatorDaemon attributeDeduplicatorDaemon , KeyspaceManager keyspaceStore ) { } }
Server server = new Server ( serverID , lockManager , rpcServer , attributeDeduplicatorDaemon , keyspaceStore ) ; Runtime . getRuntime ( ) . addShutdownHook ( new Thread ( server :: close , "grakn-server-shutdown" ) ) ; return server ;
public class CommercePriceListAccountRelPersistenceImpl { /** * Removes all the commerce price list account rels where uuid = & # 63 ; from the database . * @ param uuid the uuid */ @ Override public void removeByUuid ( String uuid ) { } }
for ( CommercePriceListAccountRel commercePriceListAccountRel : findByUuid ( uuid , QueryUtil . ALL_POS , QueryUtil . ALL_POS , null ) ) { remove ( commercePriceListAccountRel ) ; }
public class NFVORequestor { /** * Returns an EventAgent with which requests regarding Events can be sent to the NFVO . * @ return an EventAgent */ public synchronized EventAgent getEventAgent ( ) { } }
if ( this . eventAgent == null ) { if ( isService ) { this . eventAgent = new EventAgent ( this . serviceName , this . projectId , this . sslEnabled , this . nfvoIp , this . nfvoPort , this . version , this . serviceKey ) ; } else { this . eventAgent = new EventAgent ( this . username , this . password , this . projectId , this . sslEnabled , this . nfvoIp , this . nfvoPort , this . version ) ; } } return this . eventAgent ;
public class Interpolationd { /** * Compute the interpolation factors < code > ( t0 , t1 , t2 ) < / code > in order to interpolate an arbitrary value over a given * triangle at the given point < code > ( x , y ) < / code > . * This method takes in the 2D vertex positions of the three vertices of a triangle and stores in < code > dest < / code > the * factors < code > ( t0 , t1 , t2 ) < / code > in the equation < code > v ' = v0 * t0 + v1 * t1 + v2 * t2 < / code > where < code > ( v0 , v1 , v2 ) < / code > are * arbitrary ( scalar or vector ) values associated with the respective vertices of the triangle . The computed value < code > v ' < / code > * is the interpolated value at the given position < code > ( x , y ) < / code > . * @ param v0X * the x coordinate of the first triangle vertex * @ param v0Y * the y coordinate of the first triangle vertex * @ param v1X * the x coordinate of the second triangle vertex * @ param v1Y * the y coordinate of the second triangle vertex * @ param v2X * the x coordinate of the third triangle vertex * @ param v2Y * the y coordinate of the third triangle vertex * @ param x * the x coordinate of the point to interpolate at * @ param y * the y coordinate of the point to interpolate at * @ param dest * will hold the interpolation factors < code > ( t0 , t1 , t2 ) < / code > * @ return dest */ public static Vector3d interpolationFactorsTriangle ( double v0X , double v0Y , double v1X , double v1Y , double v2X , double v2Y , double x , double y , Vector3d dest ) { } }
double v12Y = v1Y - v2Y ; double v21X = v2X - v1X ; double v02X = v0X - v2X ; double yv2Y = y - v2Y ; double xv2X = x - v2X ; double v02Y = v0Y - v2Y ; double invDen = 1.0 / ( v12Y * v02X + v21X * v02Y ) ; dest . x = ( v12Y * xv2X + v21X * yv2Y ) * invDen ; dest . y = ( v02X * yv2Y - v02Y * xv2X ) * invDen ; dest . z = 1.0 - dest . x - dest . y ; return dest ;
public class SpringUtil { /** * Returns an array of resources matching the location pattern . * @ param locationPattern The location pattern . Supports classpath references . * @ return Array of matching resources . */ public static Resource [ ] getResources ( String locationPattern ) { } }
try { return resolver . getResources ( locationPattern ) ; } catch ( IOException e ) { throw MiscUtil . toUnchecked ( e ) ; }
public class AfplibPackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EEnum getFNCFNIRGLen ( ) { } }
if ( fncfnirgLenEEnum == null ) { fncfnirgLenEEnum = ( EEnum ) EPackage . Registry . INSTANCE . getEPackage ( AfplibPackage . eNS_URI ) . getEClassifiers ( ) . get ( 25 ) ; } return fncfnirgLenEEnum ;
public class FunctionalUtils { /** * Group the input pairs by the key of each pair . * @ param listInput the list of pairs to group * @ param < K > the key type * @ param < V > the value type * @ return a map representing a grouping of the * keys by the given input key type and list of values * in the grouping . */ public static < K , V > Map < K , List < V > > groupByKey ( List < Pair < K , V > > listInput ) { } }
Map < K , List < V > > ret = new HashMap < > ( ) ; for ( Pair < K , V > pair : listInput ) { List < V > currList = ret . get ( pair . getFirst ( ) ) ; if ( currList == null ) { currList = new ArrayList < > ( ) ; ret . put ( pair . getFirst ( ) , currList ) ; } currList . add ( pair . getSecond ( ) ) ; } return ret ;
public class CloudSdk { /** * https : / / github . com / GoogleCloudPlatform / appengine - plugins - core / issues / 189 */ @ VisibleForTesting Path getWindowsPythonPath ( ) { } }
String cloudSdkPython = System . getenv ( "CLOUDSDK_PYTHON" ) ; if ( cloudSdkPython != null ) { Path cloudSdkPythonPath = Paths . get ( cloudSdkPython ) ; if ( Files . exists ( cloudSdkPythonPath ) ) { return cloudSdkPythonPath ; } else { throw new InvalidPathException ( cloudSdkPython , "python binary not in specified location" ) ; } } Path pythonPath = getPath ( ) . resolve ( WINDOWS_BUNDLED_PYTHON ) ; if ( Files . exists ( pythonPath ) ) { return pythonPath ; } else { return Paths . get ( "python" ) ; }
public class Timebase { /** * Convert a sample count from one timebase to another < br / > * Note that this may result in data loss due to rounding . * @ param samples * @ param oldRate * @ param failOnPrecisionLoss * if true , precision losing operations will fail by throwing a PrecisionLostException * @ return */ public long resample ( final long samples , final Timebase oldRate , boolean failOnPrecisionLoss ) throws ResamplingException { } }
final double resampled = resample ( ( double ) samples , oldRate ) ; final double rounded = Math . round ( resampled ) ; // Warn about significant loss of precision if ( resampled != rounded && Math . abs ( rounded - resampled ) > 0.000001 ) { if ( failOnPrecisionLoss ) { throw new ResamplingException ( "Resample " + samples + " from " + oldRate + " to " + this + " would lose precision by rounding " + resampled ) ; } else { if ( WARN_ON_PRECISION_LOSS ) log . warn ( "Resample operation lost precision: " + samples + " from " + oldRate + " to " + this + " produced " + resampled + " which will be rounded to " + rounded ) ; } } return ( long ) rounded ;
public class LogDecoder { /** * Decoding an event from binary - log buffer . * @ return < code > UknownLogEvent < / code > if event type is unknown or skipped , * < code > null < / code > if buffer is not including a full event . */ public LogEvent decode ( LogBuffer buffer , LogContext context ) throws IOException { } }
final int limit = buffer . limit ( ) ; if ( limit >= FormatDescriptionLogEvent . LOG_EVENT_HEADER_LEN ) { LogHeader header = new LogHeader ( buffer , context . getFormatDescription ( ) ) ; final int len = header . getEventLen ( ) ; if ( limit >= len ) { LogEvent event ; /* Checking binary - log ' s header */ if ( handleSet . get ( header . getType ( ) ) ) { buffer . limit ( len ) ; try { /* Decoding binary - log to event */ event = decode ( buffer , header , context ) ; } catch ( IOException e ) { if ( logger . isWarnEnabled ( ) ) { logger . warn ( "Decoding " + LogEvent . getTypeName ( header . getType ( ) ) + " failed from: " + context . getLogPosition ( ) , e ) ; } throw e ; } finally { buffer . limit ( limit ) ; /* Restore limit */ } } else { /* Ignore unsupported binary - log . */ event = new UnknownLogEvent ( header ) ; } if ( event != null ) { // set logFileName event . getHeader ( ) . setLogFileName ( context . getLogPosition ( ) . getFileName ( ) ) ; event . setSemival ( buffer . semival ) ; } /* consume this binary - log . */ buffer . consume ( len ) ; return event ; } } /* Rewind buffer ' s position to 0. */ buffer . rewind ( ) ; return null ;
public class ParameterUtil { /** * Init parameter values map with the default values * @ param param parameter * @ param defValues default values * @ param parameterValues map of parameter values * @ throws QueryException if could not get default parameter values */ public static void initDefaultParameterValues ( QueryParameter param , List < Serializable > defValues , Map < String , Object > parameterValues ) throws QueryException { } }
if ( param . getSelection ( ) . equals ( QueryParameter . SINGLE_SELECTION ) ) { parameterValues . put ( param . getName ( ) , defValues . get ( 0 ) ) ; } else { Object [ ] val = new Object [ defValues . size ( ) ] ; for ( int k = 0 , size = defValues . size ( ) ; k < size ; k ++ ) { val [ k ] = defValues . get ( k ) ; } parameterValues . put ( param . getName ( ) , val ) ; }
public class WmsServerExtension { /** * Create a new WMS layer . This layer extends the default { @ link org . geomajas . gwt2 . plugin . wms . client . layer . WmsLayer } * by supporting GetFeatureInfo calls . * @ param title The layer title . * @ param crs The CRS for this layer . * @ param tileConfig The tile configuration object . * @ param layerConfig The layer configuration object . * @ param layerInfo The layer info object . Acquired from a WMS GetCapabilities . This is optional . * @ param wfsConfig The WFS configuration . * @ return A new WMS layer . */ public FeatureSearchSupportedWmsServerLayer createLayer ( String title , String crs , TileConfiguration tileConfig , WmsLayerConfiguration layerConfig , WmsLayerInfo layerInfo , WfsFeatureTypeDescriptionInfo wfsConfig ) { } }
return new FeatureSearchSupportedWmsServerLayer ( title , crs , layerConfig , tileConfig , layerInfo , wfsConfig ) ;
public class Transaction { /** * Adds a new and fully signed input for the given parameters . Note that this method is < b > not < / b > thread safe * and requires external synchronization . Please refer to general documentation on Bitcoin scripting and contracts * to understand the values of sigHash and anyoneCanPay : otherwise you can use the other form of this method * that sets them to typical defaults . * @ throws ScriptException if the scriptPubKey is not a pay to address or pay to pubkey script . */ public TransactionInput addSignedInput ( TransactionOutPoint prevOut , Script scriptPubKey , ECKey sigKey , SigHash sigHash , boolean anyoneCanPay ) throws ScriptException { } }
// Verify the API user didn ' t try to do operations out of order . checkState ( ! outputs . isEmpty ( ) , "Attempting to sign tx without outputs." ) ; TransactionInput input = new TransactionInput ( params , this , new byte [ ] { } , prevOut ) ; addInput ( input ) ; int inputIndex = inputs . size ( ) - 1 ; if ( ScriptPattern . isP2PK ( scriptPubKey ) ) { TransactionSignature signature = calculateSignature ( inputIndex , sigKey , scriptPubKey , sigHash , anyoneCanPay ) ; input . setScriptSig ( ScriptBuilder . createInputScript ( signature ) ) ; input . setWitness ( null ) ; } else if ( ScriptPattern . isP2PKH ( scriptPubKey ) ) { TransactionSignature signature = calculateSignature ( inputIndex , sigKey , scriptPubKey , sigHash , anyoneCanPay ) ; input . setScriptSig ( ScriptBuilder . createInputScript ( signature , sigKey ) ) ; input . setWitness ( null ) ; } else if ( ScriptPattern . isP2WPKH ( scriptPubKey ) ) { Script scriptCode = new ScriptBuilder ( ) . data ( ScriptBuilder . createOutputScript ( LegacyAddress . fromKey ( params , sigKey ) ) . getProgram ( ) ) . build ( ) ; TransactionSignature signature = calculateWitnessSignature ( inputIndex , sigKey , scriptCode , input . getValue ( ) , sigHash , anyoneCanPay ) ; input . setScriptSig ( ScriptBuilder . createEmpty ( ) ) ; input . setWitness ( TransactionWitness . redeemP2WPKH ( signature , sigKey ) ) ; } else { throw new ScriptException ( ScriptError . SCRIPT_ERR_UNKNOWN_ERROR , "Don't know how to sign for this kind of scriptPubKey: " + scriptPubKey ) ; } return input ;
public class Leader { /** * Finds an epoch number which is higher than any proposed epoch in quorum * set and propose the epoch to them . * @ throws IOException in case of IO failure . */ void proposeNewEpoch ( ) throws IOException { } }
long maxEpoch = persistence . getProposedEpoch ( ) ; int maxSyncTimeoutMs = getSyncTimeoutMs ( ) ; for ( PeerHandler ph : this . quorumMap . values ( ) ) { if ( ph . getLastProposedEpoch ( ) > maxEpoch ) { maxEpoch = ph . getLastProposedEpoch ( ) ; } if ( ph . getSyncTimeoutMs ( ) > maxSyncTimeoutMs ) { maxSyncTimeoutMs = ph . getSyncTimeoutMs ( ) ; } } // The new epoch number should be larger than any follower ' s epoch . long newEpoch = maxEpoch + 1 ; // Updates leader ' s last proposed epoch . persistence . setProposedEpoch ( newEpoch ) ; // Updates leader ' s sync timeout to the largest timeout found in the quorum . setSyncTimeoutMs ( maxSyncTimeoutMs ) ; LOG . debug ( "Begins proposing new epoch {} with sync timeout {} ms" , newEpoch , getSyncTimeoutMs ( ) ) ; // Sends new epoch message to quorum . broadcast ( this . quorumMap . keySet ( ) . iterator ( ) , MessageBuilder . buildNewEpochMessage ( newEpoch , getSyncTimeoutMs ( ) ) ) ;
public class CodeSigningMarshaller { /** * Marshall the given parameter object . */ public void marshall ( CodeSigning codeSigning , ProtocolMarshaller protocolMarshaller ) { } }
if ( codeSigning == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( codeSigning . getAwsSignerJobId ( ) , AWSSIGNERJOBID_BINDING ) ; protocolMarshaller . marshall ( codeSigning . getStartSigningJobParameter ( ) , STARTSIGNINGJOBPARAMETER_BINDING ) ; protocolMarshaller . marshall ( codeSigning . getCustomCodeSigning ( ) , CUSTOMCODESIGNING_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class Utils { /** * Creates a unique device id . Suppresses ` HardwareIds ` lint warnings as we don ' t use this ID for * identifying specific users . This is also what is required by the Segment spec . */ @ SuppressLint ( "HardwareIds" ) public static String getDeviceId ( Context context ) { } }
String androidId = getString ( context . getContentResolver ( ) , ANDROID_ID ) ; if ( ! isNullOrEmpty ( androidId ) && ! "9774d56d682e549c" . equals ( androidId ) && ! "unknown" . equals ( androidId ) && ! "000000000000000" . equals ( androidId ) ) { return androidId ; } // Serial number , guaranteed to be on all non phones in 2.3 + . if ( ! isNullOrEmpty ( Build . SERIAL ) ) { return Build . SERIAL ; } // Telephony ID , guaranteed to be on all phones , requires READ _ PHONE _ STATE permission if ( hasPermission ( context , READ_PHONE_STATE ) && hasFeature ( context , FEATURE_TELEPHONY ) ) { TelephonyManager telephonyManager = getSystemService ( context , TELEPHONY_SERVICE ) ; @ SuppressLint ( "MissingPermission" ) String telephonyId = telephonyManager . getDeviceId ( ) ; if ( ! isNullOrEmpty ( telephonyId ) ) { return telephonyId ; } } // If this still fails , generate random identifier that does not persist across installations return UUID . randomUUID ( ) . toString ( ) ;
public class WarningPropertyUtil { /** * Get a Location matching the given PC value . Because of JSR subroutines , * there may be multiple Locations referring to the given instruction . This * method simply returns one of them arbitrarily . * @ param classContext * the ClassContext containing the method * @ param method * the method * @ param pc * a PC value of an instruction in the method * @ return a Location corresponding to the PC value , or null if no such * Location can be found * @ throws CFGBuilderException */ private static Location pcToLocation ( ClassContext classContext , Method method , int pc ) throws CFGBuilderException { } }
CFG cfg = classContext . getCFG ( method ) ; for ( Iterator < Location > i = cfg . locationIterator ( ) ; i . hasNext ( ) ; ) { Location location = i . next ( ) ; if ( location . getHandle ( ) . getPosition ( ) == pc ) { return location ; } } return null ;
public class JCuda { /** * Allocate an array on the device . * < pre > * cudaError _ t cudaMalloc3DArray ( * cudaArray _ t * array , * const cudaChannelFormatDesc * desc , * cudaExtent extent , * unsigned int flags = 0 ) * < / pre > * < div > * < p > Allocate an array on the device . * Allocates a CUDA array according to the cudaChannelFormatDesc structure * < tt > desc < / tt > and returns a handle to the new CUDA array in < tt > * array < / tt > . * < p > The cudaChannelFormatDesc is defined * as : * < pre > struct cudaChannelFormatDesc { * int x , y , z , w ; * enum cudaChannelFormatKind * } ; < / pre > * where cudaChannelFormatKind is one of * cudaChannelFormatKindSigned , cudaChannelFormatKindUnsigned , or * cudaChannelFormatKindFloat . * < p > cudaMalloc3DArray ( ) can allocate the * following : * < ul > * < li > * < p > A 1D array is allocated if the * height and depth extents are both zero . * < / li > * < li > * < p > A 2D array is allocated if only * the depth extent is zero . * < / li > * < li > * < p > A 3D array is allocated if all * three extents are non - zero . * < / li > * < li > * < p > A 1D layered CUDA array is * allocated if only the height extent is zero and the cudaArrayLayered * flag is set . Each layer is * a 1D array . The number of layers * is determined by the depth extent . * < / li > * < li > * < p > A 2D layered CUDA array is * allocated if all three extents are non - zero and the cudaArrayLayered * flag is set . Each layer is * a 2D array . The number of layers * is determined by the depth extent . * < / li > * < li > * < p > A cubemap CUDA array is * allocated if all three extents are non - zero and the cudaArrayCubemap * flag is set . Width must be equal * to height , and depth must be * six . A cubemap is a special type of 2D layered CUDA array , where the * six layers represent the * six faces of a cube . The order * of the six layers in memory is the same as that listed in * cudaGraphicsCubeFace . * < / li > * < li > * < p > A cubemap layered CUDA array * is allocated if all three extents are non - zero , and both , cudaArrayCubemap * and cudaArrayLayered * flags are set . Width must be * equal to height , and depth must be a multiple of six . A cubemap layered * CUDA array is a special * type of 2D layered CUDA array * that consists of a collection of cubemaps . The first six layers * represent the first cubemap , * the next six layers form the * second cubemap , and so on . * < / li > * < / ul > * < p > The < tt > flags < / tt > parameter enables * different options to be specified that affect the allocation , as * follows . * < ul > * < li > * < p > cudaArrayDefault : This flag ' s * value is defined to be 0 and provides default array allocation * < / li > * < li > * < p > cudaArrayLayered : Allocates a * layered CUDA array , with the depth extent indicating the number of * layers * < / li > * < li > * < p > cudaArrayCubemap : Allocates a * cubemap CUDA array . Width must be equal to height , and depth must be * six . If the cudaArrayLayered flag is also * set , depth must be a multiple * of six . * < / li > * < li > * < p > cudaArraySurfaceLoadStore : * Allocates a CUDA array that could be read from or written to using a * surface reference . * < / li > * < li > * < p > cudaArrayTextureGather : This * flag indicates that texture gather operations will be performed on the * CUDA array . Texture gather can only be performed * on 2D CUDA arrays . * < / li > * < / ul > * < p > The width , height and depth extents must * meet certain size requirements as listed in the following table . All * values are specified * in elements . * < p > Note that 2D CUDA arrays have different * size requirements if the cudaArrayTextureGather flag is set . In that * case , the valid range for ( width , height , depth ) is * ( ( 1 , maxTexture2DGather [ 0 ] ) , ( 1 , maxTexture2DGather [ 1 ] ) , * < div > * < table cellpadding = " 4 " cellspacing = " 0 " summary = " " frame = " border " border = " 1 " rules = " all " > * < tbody > * < tr > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > < strong > CUDA array * type < / strong > * < / td > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > < strong > Valid extents * that must always be met * { ( width range in * elements ) , ( height range ) , ( depth range ) } < / strong > * < / td > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > < strong > Valid extents * with cudaArraySurfaceLoadStore set * { ( width range in * elements ) , ( height range ) , ( depth range ) } < / strong > * < / td > * < / tr > * < tr > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > 1D < / p > * < / td > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > { ( 1 , maxTexture1D ) , * 0 , 0 } * < / td > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > { ( 1 , maxSurface1D ) , * 0 , 0 } * < / td > * < / tr > * < tr > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > 2D < / p > * < / td > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > { ( 1 , maxTexture2D [ 0 ] ) , * ( 1 , maxTexture2D [ 1 ] ) , 0 } * < / td > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > { ( 1 , maxSurface2D [ 0 ] ) , * ( 1 , maxSurface2D [ 1 ] ) , 0 } * < / td > * < / tr > * < tr > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > 3D < / p > * < / td > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > { ( 1 , maxTexture3D [ 0 ] ) , * ( 1 , maxTexture3D [ 1 ] ) , ( 1 , maxTexture3D [ 2 ] ) } * < / td > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > { ( 1 , maxSurface3D [ 0 ] ) , * ( 1 , maxSurface3D [ 1 ] ) , ( 1 , maxSurface3D [ 2 ] ) } * < / td > * < / tr > * < tr > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > 1D Layered < / p > * < / td > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * ( 1 , maxTexture1DLayered [ 0 ] ) , 0 , ( 1 , maxTexture1DLayered [ 1 ] ) } * < / td > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * ( 1 , maxSurface1DLayered [ 0 ] ) , 0 , ( 1 , maxSurface1DLayered [ 1 ] ) } * < / td > * < / tr > * < tr > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > 2D Layered < / p > * < / td > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * ( 1 , maxTexture2DLayered [ 0 ] ) , ( 1 , maxTexture2DLayered [ 1 ] ) , * ( 1 , maxTexture2DLayered [ 2 ] ) } * < / td > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * ( 1 , maxSurface2DLayered [ 0 ] ) , ( 1 , maxSurface2DLayered [ 1 ] ) , * ( 1 , maxSurface2DLayered [ 2 ] ) } * < / td > * < / tr > * < tr > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > Cubemap < / p > * < / td > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > { ( 1 , maxTextureCubemap ) , * ( 1 , maxTextureCubemap ) , 6 } * < / td > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > { ( 1 , maxSurfaceCubemap ) , * ( 1 , maxSurfaceCubemap ) , 6 } * < / td > * < / tr > * < tr > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * < p > Cubemap Layered < / p > * < / td > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * ( 1 , maxTextureCubemapLayered [ 0 ] ) , ( 1 , maxTextureCubemapLayered [ 0 ] ) , * ( 1 , maxTextureCubemapLayered [ 1 ] ) } * < / td > * < td valign = " top " rowspan = " 1 " colspan = " 1 " > * ( 1 , maxSurfaceCubemapLayered [ 0 ] ) , ( 1 , maxSurfaceCubemapLayered [ 0 ] ) , * ( 1 , maxSurfaceCubemapLayered [ 1 ] ) } * < / td > * < / tr > * < / tbody > * < / table > * < / div > * < div > * < span > Note : < / span > * < p > Note that this * function may also return error codes from previous , asynchronous * launches . * < / div > * < / div > * @ param array Pointer to allocated array in device memory * @ param desc Requested channel format * @ param extent Requested allocation size ( width field in elements ) * @ param flags Flags for extensions * @ return cudaSuccess , cudaErrorMemoryAllocation * @ see JCuda # cudaMalloc3D * @ see JCuda # cudaMalloc * @ see JCuda # cudaMallocPitch * @ see JCuda # cudaFree * @ see JCuda # cudaFreeArray * @ see JCuda # cudaMallocHost * @ see JCuda # cudaFreeHost * @ see JCuda # cudaHostAlloc * @ see cudaExtent */ public static int cudaMalloc3DArray ( cudaArray arrayPtr , cudaChannelFormatDesc desc , cudaExtent extent ) { } }
return cudaMalloc3DArray ( arrayPtr , desc , extent , 0 ) ;
public class KAMStoreImpl { /** * { @ inheritDoc } */ @ Override public boolean collapseKamNode ( KamInfo info , KamNode collapsing , KamNode collapseTo ) { } }
try { KAMUpdateDao updateDao = kamUpdateDao ( info ) ; return updateDao . collapseKamNode ( collapsing , collapseTo ) ; } catch ( SQLException e ) { final String fmt = "error collapsing node for %s" ; final String msg = format ( fmt , info . getName ( ) ) ; throw new KAMStoreException ( msg , e ) ; }
public class ParserTokenStream { /** * Consumes all tokens until the token at the front of the stream is of one of the given types . * @ param types The types to cause the stream to stop consuming * @ return The list of tokens that were consumed . */ public List < ParserToken > consumeUntil ( ParserTokenType ... types ) { } }
List < ParserToken > tokens = new ArrayList < > ( ) ; while ( lookAheadType ( 0 ) != null && ! isOfType ( lookAheadType ( 0 ) , types ) ) { tokens . add ( consume ( ) ) ; } return tokens ;
public class ArrayBlock { /** * Create an array block directly without per element validations . */ static ArrayBlock createArrayBlockInternal ( int arrayOffset , int positionCount , @ Nullable boolean [ ] valueIsNull , int [ ] offsets , Block values ) { } }
validateConstructorArguments ( arrayOffset , positionCount , valueIsNull , offsets , values ) ; return new ArrayBlock ( arrayOffset , positionCount , valueIsNull , offsets , values ) ;
public class SqlApplicationConfigurationUpdate { /** * The array of < a > OutputUpdate < / a > objects describing the new destination streams used by the application . * @ param outputUpdates * The array of < a > OutputUpdate < / a > objects describing the new destination streams used by the application . */ public void setOutputUpdates ( java . util . Collection < OutputUpdate > outputUpdates ) { } }
if ( outputUpdates == null ) { this . outputUpdates = null ; return ; } this . outputUpdates = new java . util . ArrayList < OutputUpdate > ( outputUpdates ) ;
public class ConstructorCopier { /** * TODO may need to pay attention itf = = true */ @ Override public void visitMethodInsn ( final int opcode , final String owner , final String name , final String desc , boolean itf ) { } }
// If this is an invokespecial , first determine if it is the one of interest ( the one calling our super constructor ) if ( opcode == INVOKESPECIAL && name . charAt ( 0 ) == '<' ) { if ( unitializedObjectsCount != 0 ) { unitializedObjectsCount -- ; } else { // This looks like our INVOKESPECIAL if ( state == preInvokeSpecial ) { // special case for calling jlObject , do nothing ! if ( owner . equals ( "java/lang/Object" ) ) { mv . visitInsn ( POP ) ; } else { // Need to replace this INVOKESPECIAL call . String supertypename = typeDescriptor . getSupertypeName ( ) ; ReloadableType superRtype = typeDescriptor . getReloadableType ( ) . getTypeRegistry ( ) . getReloadableSuperType ( supertypename ) ; if ( superRtype == null ) { // supertype was not reloadable . This either means it really isn ' t ( doesn ' t match what we consider reloadable ) // or it just hasn ' t been loaded yet . // In a real scenario supertypes will get loaded first always and this can ' t happen ( the latter case ) - it happens in tests // because they don ' t actively load all their bits and pieces in a hierarchical way . Given that on a reloadable boundary // the magic ctors are setup to call a default ctor , we can assume that above the boundary the object has been initialized . // this means we don ' t need to call a super _ _ init _ _ or _ _ execute . . . /* if ( typeDescriptor . getReloadableType ( ) . getTypeRegistry ( ) . isReloadableTypeName ( supertypename ) ) { superRtype = typeDescriptor . getReloadableType ( ) . getTypeRegistry ( ) . getReloadableSuperType ( supertypename ) ; throw new IllegalStateException ( " The supertype " + supertypename . replace ( ' / ' , ' . ' ) + " has not been loaded as a reloadabletype " ) ; */ Utils . insertPopsForAllParameters ( mv , desc ) ; mv . visitInsn ( POP ) ; // pop ' this ' } else { // Check the original form of the supertype for a constructor to call MethodMember existingCtor = ( superRtype == null ? null : superRtype . getTypeDescriptor ( ) . getConstructor ( desc ) ) ; if ( existingCtor == null ) { // It did not exist in the original supertype version , need to use dynamic dispatch method // collapse the arguments on the stack Utils . collapseStackToArray ( mv , desc ) ; // now the stack is the instance then the params mv . visitInsn ( SWAP ) ; mv . visitInsn ( DUP_X1 ) ; // no stack is instance then params then instance mv . visitLdcInsn ( "<init>" + desc ) ; mv . visitMethodInsn ( INVOKESPECIAL , typeDescriptor . getSupertypeName ( ) , mDynamicDispatchName , mDynamicDispatchDescriptor , false ) ; mv . visitInsn ( POP ) ; } else { // it did exist in the original , so there will be parallel constructor mv . visitMethodInsn ( INVOKESPECIAL , typeDescriptor . getSupertypeName ( ) , mInitializerName , desc , false ) ; } } } state = postInvokeSpecial ; return ; } } } // Is it a private method call ? // TODO r $ check here because we use invokespecial to avoid virtual dispatch on field changes . . . if ( opcode == INVOKESPECIAL && name . charAt ( 0 ) != '<' && owner . equals ( classname ) && ! name . startsWith ( "r$" ) ) { // leaving the invokespecial alone will cause a verify error String descriptor = Utils . insertExtraParameter ( owner , desc ) ; super . visitMethodInsn ( INVOKESTATIC , Utils . getExecutorName ( classname , suffix ) , name , descriptor , false ) ; } else { boolean done = false ; // TODO dup of code in method copier - can we refactor ? if ( opcode == INVOKESTATIC ) { MethodMember mm = typeDescriptor . getByDescriptor ( name , desc ) ; if ( mm != null && mm . isPrivate ( ) ) { super . visitMethodInsn ( INVOKESTATIC , Utils . getExecutorName ( classname , suffix ) , name , desc , false ) ; done = true ; } } if ( ! done ) { super . visitMethodInsn ( opcode , owner , name , desc , itf ) ; } }
public class Scs_util { /** * Allocate a Scsd object ( a Sulmage - Mendelsohn decomposition ) . * @ param m * number of rows of the matrix A to be analyzed * @ param n * number of columns of the matrix A to be analyzed * @ return Sulmage - Mendelsohn decomposition */ public static Scsd cs_dalloc ( int m , int n ) { } }
Scsd S ; S = new Scsd ( ) ; S . p = new int [ m ] ; S . r = new int [ m + 6 ] ; S . q = new int [ n ] ; S . s = new int [ n + 6 ] ; S . cc = new int [ 5 ] ; S . rr = new int [ 5 ] ; return S ;
public class DefaultAgenda { /** * ( non - Javadoc ) * @ see org . kie . common . AgendaI # setFocus ( org . kie . spi . AgendaGroup ) */ @ Override public boolean setFocus ( final AgendaGroup agendaGroup ) { } }
// Set the focus to the agendaGroup if it doesn ' t already have the focus if ( this . focusStack . getLast ( ) != agendaGroup ) { ( ( InternalAgendaGroup ) this . focusStack . getLast ( ) ) . setActive ( false ) ; this . focusStack . add ( agendaGroup ) ; InternalAgendaGroup igroup = ( InternalAgendaGroup ) agendaGroup ; igroup . setActive ( true ) ; igroup . setActivatedForRecency ( this . workingMemory . getFactHandleFactory ( ) . getRecency ( ) ) ; final EventSupport eventsupport = this . workingMemory ; eventsupport . getAgendaEventSupport ( ) . fireAgendaGroupPushed ( agendaGroup , this . workingMemory ) ; return true ; } else { return false ; }
public class CxDxClientSessionImpl { /** * ( non - Javadoc ) * @ see org . jdiameter . api . cxdx . ClientCxDxSession # sendLocationInformationRequest ( org . jdiameter . api . cxdx . events . JLocationInfoRequest ) */ @ Override public void sendLocationInformationRequest ( JLocationInfoRequest request ) throws InternalException , IllegalDiameterStateException , RouteException , OverloadException { } }
send ( Event . Type . SEND_MESSAGE , request , null ) ;
public class nitro_service { /** * Use this API to login into Netscaler . * @ param username Username * @ param password Password for the Netscaler . * @ param timeout timeout for netscaler session . Default is 1800secs * @ return status of the operation performed . * @ throws Exception nitro exception is thrown . */ public base_response login ( String username , String password , Long timeout ) throws Exception { } }
this . set_credential ( username , password ) ; this . set_timeout ( timeout ) ; return this . login ( ) ;
public class Debugger { public static void printFatal ( Object caller , String format , Object ... args ) { } }
Object msg = String . format ( format , args ) ; printFatal ( caller , msg ) ;
public class PravegaRequestProcessor { /** * Copy all of the contents provided into a byteBuffer and return it . */ @ SneakyThrows ( IOException . class ) private ByteBuffer copyData ( List < ReadResultEntryContents > contents ) { } }
int totalSize = contents . stream ( ) . mapToInt ( ReadResultEntryContents :: getLength ) . sum ( ) ; ByteBuffer data = ByteBuffer . allocate ( totalSize ) ; int bytesCopied = 0 ; for ( ReadResultEntryContents content : contents ) { int copied = StreamHelpers . readAll ( content . getData ( ) , data . array ( ) , bytesCopied , totalSize - bytesCopied ) ; Preconditions . checkState ( copied == content . getLength ( ) , "Read fewer bytes than available." ) ; bytesCopied += copied ; } return data ;
public class ElevationShadowView { /** * Obtains the view ' s attributes from a specific attribute set . * @ param attributeSet * The attribute set , the view ' s attributes should be obtained from , as an instance of * the type { @ link AttributeSet } or null , if no attributes should be obtained * @ param defaultStyle * The default style to apply to this view . If 0 , no style will be applied ( beyond what * is included in the theme ) . This may either be an attribute resource , whose value will * be retrieved from the current theme , or an explicit style resource * @ param defaultStyleResource * A resource identifier of a style resource that supplies default values for the view , * used only if the default style is 0 or can not be found in the theme . Can be 0 to not * look for defaults */ private void obtainStyledAttributes ( @ Nullable final AttributeSet attributeSet , @ AttrRes final int defaultStyle , @ StyleRes final int defaultStyleResource ) { } }
TypedArray typedArray = getContext ( ) . obtainStyledAttributes ( attributeSet , R . styleable . ElevationShadowView , defaultStyle , defaultStyleResource ) ; try { obtainShadowElevation ( typedArray ) ; obtainShadowOrientation ( typedArray ) ; obtainEmulateParallelLight ( typedArray ) ; } finally { typedArray . recycle ( ) ; }
public class ClientAdminBootstrap { /** * Explicitly override autoapprove in all clients that were provided in the * whitelist . */ private void updateAutoApproveClients ( ) { } }
autoApproveClients . removeAll ( clientsToDelete ) ; for ( String clientId : autoApproveClients ) { try { BaseClientDetails base = ( BaseClientDetails ) clientRegistrationService . loadClientByClientId ( clientId , IdentityZone . getUaaZoneId ( ) ) ; base . addAdditionalInformation ( ClientConstants . AUTO_APPROVE , true ) ; logger . debug ( "Adding autoapprove flag to client: " + clientId ) ; clientRegistrationService . updateClientDetails ( base , IdentityZone . getUaaZoneId ( ) ) ; } catch ( NoSuchClientException n ) { logger . debug ( "Client not found, unable to set autoapprove: " + clientId ) ; } }
public class IdentityProviderType { /** * The identity provider details , such as < code > MetadataURL < / code > and < code > MetadataFile < / code > . * @ param providerDetails * The identity provider details , such as < code > MetadataURL < / code > and < code > MetadataFile < / code > . * @ return Returns a reference to this object so that method calls can be chained together . */ public IdentityProviderType withProviderDetails ( java . util . Map < String , String > providerDetails ) { } }
setProviderDetails ( providerDetails ) ; return this ;
public class ZConfig { /** * Saves the configuration to a file . * < strong > This method will overwrite contents of existing file < / strong > * @ param filename the path of the file to save the configuration into , or " - " to dump it to standard output * @ return the saved file or null if dumped to the standard output * @ throws IOException if unable to save the file . */ public File save ( String filename ) throws IOException { } }
if ( "-" . equals ( filename ) ) { // print to console try ( Writer writer = new PrintWriter ( System . out ) ) { save ( writer ) ; } return null ; } else { // write to file final File file = new File ( filename ) ; if ( file . exists ( ) ) { file . delete ( ) ; } else { // create necessary directories ; file . getParentFile ( ) . mkdirs ( ) ; } Writer writer = new FileWriter ( file ) ; try { save ( writer ) ; } finally { writer . close ( ) ; } return file ; }
public class FctConvertersToFromString { /** * < p > Get CnvTfsDateTime ( create and put into map ) . < / p > * @ return requested CnvTfsDateTime * @ throws Exception - an exception */ protected final CnvTfsDateTime createPutCnvTfsDateTime ( ) throws Exception { } }
CnvTfsDateTime convrt = new CnvTfsDateTime ( ) ; this . convertersMap . put ( CnvTfsDateTime . class . getSimpleName ( ) , convrt ) ; return convrt ;
public class DateCaster { /** * converts a String to a DateTime Object , returns null if invalid string * @ param str String to convert * @ param convertingType one of the following values : - CONVERTING _ TYPE _ NONE : number are not * converted at all - CONVERTING _ TYPE _ YEAR : integers are handled as years - * CONVERTING _ TYPE _ OFFSET : numbers are handled as offset from 1899-12-30 00:00:00 UTC * @ param alsoMonthString allow that the month is a english name * @ param timeZone * @ param defaultValue * @ return Date Time Object */ private static DateTime parseDateTime ( String str , DateString ds , short convertingType , boolean alsoMonthString , TimeZone timeZone , DateTime defaultValue ) { } }
int month = 0 ; int first = ds . readDigits ( ) ; // first if ( first == - 1 ) { if ( ! alsoMonthString ) return defaultValue ; first = ds . readMonthString ( ) ; if ( first == - 1 ) return defaultValue ; month = 1 ; } if ( ds . isAfterLast ( ) ) return month == 1 ? defaultValue : numberToDate ( timeZone , Caster . toDoubleValue ( str , Double . NaN ) , convertingType , defaultValue ) ; char del = ds . current ( ) ; if ( del != '.' && del != '/' && del != '-' && del != ' ' && del != '\t' ) { if ( ds . fwIfCurrent ( ':' ) ) { return parseTime ( timeZone , new int [ ] { 1899 , 12 , 30 } , ds , defaultValue , first ) ; } return defaultValue ; } ds . next ( ) ; ds . removeWhitespace ( ) ; // second int second = ds . readDigits ( ) ; if ( second == - 1 ) { if ( ! alsoMonthString || month != 0 ) return defaultValue ; second = ds . readMonthString ( ) ; if ( second == - 1 ) return defaultValue ; month = 2 ; } if ( ds . isAfterLast ( ) ) { return toDate ( month , timeZone , first , second , defaultValue ) ; } char del2 = ds . current ( ) ; if ( del != del2 ) { ds . fwIfCurrent ( ' ' ) ; ds . fwIfCurrent ( 'T' ) ; ds . fwIfCurrent ( ' ' ) ; return parseTime ( timeZone , _toDate ( timeZone , month , first , second ) , ds , defaultValue , - 1 ) ; } ds . next ( ) ; ds . removeWhitespace ( ) ; int third = ds . readDigits ( ) ; if ( third == - 1 ) { return defaultValue ; } if ( ds . isAfterLast ( ) ) { if ( classicStyle ( ) && del == '.' ) return toDate ( month , timeZone , second , first , third , defaultValue ) ; return toDate ( month , timeZone , first , second , third , defaultValue ) ; } ds . fwIfCurrent ( ' ' ) ; ds . fwIfCurrent ( 'T' ) ; ds . fwIfCurrent ( ' ' ) ; if ( classicStyle ( ) && del == '.' ) return parseTime ( timeZone , _toDate ( month , second , first , third ) , ds , defaultValue , - 1 ) ; return parseTime ( timeZone , _toDate ( month , first , second , third ) , ds , defaultValue , - 1 ) ;
public class WordReplacementIterator { /** * Advances to the next word in the token stream . */ public void advance ( ) { } }
String s = null ; if ( baseIterator . hasNext ( ) ) next = baseIterator . next ( ) ; else next = null ;
public class SDDLHelper { /** * Check if user canot change password . * @ param sddl SSDL . * @ return < tt > true < / tt > if user cannot change password : < tt > false < / tt > otherwise . */ public static boolean isUserCannotChangePassword ( final SDDL sddl ) { } }
boolean res = false ; final List < ACE > aces = sddl . getDacl ( ) . getAces ( ) ; for ( int i = 0 ; ! res && i < aces . size ( ) ; i ++ ) { final ACE ace = aces . get ( i ) ; if ( ace . getType ( ) == AceType . ACCESS_DENIED_OBJECT_ACE_TYPE && ace . getObjectFlags ( ) . getFlags ( ) . contains ( AceObjectFlags . Flag . ACE_OBJECT_TYPE_PRESENT ) ) { if ( GUID . getGuidAsString ( ace . getObjectType ( ) ) . equals ( UCP_OBJECT_GUID ) ) { final SID sid = ace . getSid ( ) ; if ( sid . getSubAuthorities ( ) . size ( ) == 1 ) { if ( ( Arrays . equals ( sid . getIdentifierAuthority ( ) , new byte [ ] { 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x01 } ) && Arrays . equals ( sid . getSubAuthorities ( ) . get ( 0 ) , new byte [ ] { 0x00 , 0x00 , 0x00 , 0x00 } ) ) || ( Arrays . equals ( sid . getIdentifierAuthority ( ) , new byte [ ] { 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x05 } ) && Arrays . equals ( sid . getSubAuthorities ( ) . get ( 0 ) , new byte [ ] { 0x00 , 0x00 , 0x00 , 0x0a } ) ) ) { res = true ; } } } } } return res ;
public class RdKNNTree { /** * Throws an IllegalArgumentException if the specified distance function is * not an instance of the distance function used by this index . * @ throws IllegalArgumentException * @ param distanceFunction the distance function to be checked */ private void checkDistanceFunction ( SpatialPrimitiveDistanceFunction < ? super O > distanceFunction ) { } }
if ( ! settings . distanceFunction . equals ( distanceFunction ) ) { throw new IllegalArgumentException ( "Parameter distanceFunction must be an instance of " + this . distanceQuery . getClass ( ) + ", but is " + distanceFunction . getClass ( ) ) ; }
public class AWSIotClient { /** * Lists the job executions for a job . * @ param listJobExecutionsForJobRequest * @ return Result of the ListJobExecutionsForJob operation returned by the service . * @ throws InvalidRequestException * The request is not valid . * @ throws ResourceNotFoundException * The specified resource does not exist . * @ throws ThrottlingException * The rate exceeds the limit . * @ throws ServiceUnavailableException * The service is temporarily unavailable . * @ sample AWSIot . ListJobExecutionsForJob */ @ Override public ListJobExecutionsForJobResult listJobExecutionsForJob ( ListJobExecutionsForJobRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeListJobExecutionsForJob ( request ) ;
public class ObservableListenerHelper { /** * { @ inheritDoc } */ @ Override public void addListener ( ChangeListener < ? super T > listener ) { } }
Objects . requireNonNull ( listener ) ; if ( size == 0 ) { sentinel = false ; this . listener = listener ; this . value = getValue ( ) ; } else if ( size == 1 ) { sentinel = false ; this . listener = new Object [ ] { this . listener , listener } ; } else { Object [ ] l = ( Object [ ] ) this . listener ; if ( l . length <= size + 1 ) { // test for sentinel not required as we put the new listener behind this . size , thus it won ' t be fired sentinel = false ; l = Arrays . copyOf ( l , l . length * 3 / 2 + 1 ) ; this . listener = l ; } l [ size ] = listener ; } if ( invalidationSize == size ) { this . value = getValue ( ) ; } size ++ ;
public class VirtualMachineExtensionImagesInner { /** * Gets a list of virtual machine extension image types . * @ param location The name of a supported Azure region . * @ param publisherName the String value * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < List < VirtualMachineExtensionImageInner > > listTypesAsync ( String location , String publisherName , final ServiceCallback < List < VirtualMachineExtensionImageInner > > serviceCallback ) { } }
return ServiceFuture . fromResponse ( listTypesWithServiceResponseAsync ( location , publisherName ) , serviceCallback ) ;
public class ExpressionUtils { /** * Create a new Template expression * @ param cl type of expression * @ param template template * @ param args template parameters * @ return template expression */ public static < T > TemplateExpression < T > template ( Class < ? extends T > cl , String template , Object ... args ) { } }
return template ( cl , TemplateFactory . DEFAULT . create ( template ) , ImmutableList . copyOf ( args ) ) ;
public class JCompositeRowsSelectorPanel { /** * < / editor - fold > / / GEN - END : initComponents */ public void addItemsToComposite ( String testplan , String row ) { } }
String [ ] path = new String [ 3 ] ; path [ 0 ] = tree2RootName ; path [ 1 ] = testplan ; path [ 2 ] = row ; TreePath [ ] tp = new TreePath [ 1 ] ; tp [ 0 ] = new TreePath ( path ) ; addItemsToComposite ( tp ) ;
public class TopologyStarter { /** * Applies type conversion where needed . * @ return a copy of source ready for Storm . * @ see < a href = " https : / / issues . apache . org / jira / browse / STORM - 173 " > Strom issue 173 < / a > */ public static Config stormConfig ( Properties source ) { } }
Config result = new Config ( ) ; logger . debug ( "Mapping declared types for Storm properties..." ) ; for ( Field field : result . getClass ( ) . getDeclaredFields ( ) ) { if ( field . getType ( ) != String . class ) continue ; if ( field . getName ( ) . endsWith ( CONFIGURATION_TYPE_FIELD_SUFFIX ) ) continue ; try { String key = field . get ( result ) . toString ( ) ; String entry = source . getProperty ( key ) ; if ( entry == null ) continue ; String typeFieldName = field . getName ( ) + CONFIGURATION_TYPE_FIELD_SUFFIX ; Field typeField = result . getClass ( ) . getDeclaredField ( typeFieldName ) ; Object type = typeField . get ( result ) ; logger . trace ( "Detected key '{}' as: {}" , key , field ) ; Object value = null ; if ( type == String . class ) value = entry ; if ( type == ConfigValidation . IntegerValidator . class || type == ConfigValidation . PowerOf2Validator . class ) value = Integer . valueOf ( entry ) ; if ( type == Boolean . class ) value = Boolean . valueOf ( entry ) ; if ( type == ConfigValidation . StringOrStringListValidator . class ) value = asList ( entry . split ( LIST_CONTINUATION_PATTERN ) ) ; if ( value == null ) { logger . warn ( "No parser for key '{}' type: {}" , key , typeField ) ; value = entry ; } result . put ( key , value ) ; } catch ( ReflectiveOperationException e ) { logger . debug ( "Interpretation failure on {}: {}" , field , e ) ; } } // Copy remaining for ( Map . Entry < Object , Object > e : source . entrySet ( ) ) { String key = e . getKey ( ) . toString ( ) ; if ( result . containsKey ( key ) ) continue ; result . put ( key , e . getValue ( ) ) ; } return result ;
public class Client { /** * Initializes the { @ link SessionState } from a previous snapshot with specific state information . * If a system needs to be built that withstands outages and needs to resume where left off , this method , * combined with the periodic persistence of the { @ link SessionState } provides resume capabilities . If you * need to start fresh , take a look at { @ link # initializeState ( StreamFrom , StreamTo ) } as well as * { @ link # recoverOrInitializeState ( StateFormat , byte [ ] , StreamFrom , StreamTo ) } . * @ param format the format used when persisting . * @ param persistedState the opaque byte array representing the persisted state . * @ return A { @ link Completable } indicating the success or failure of the state recovery . */ public Completable recoverState ( final StateFormat format , final byte [ ] persistedState ) { } }
return Completable . create ( new Completable . OnSubscribe ( ) { @ Override public void call ( CompletableSubscriber subscriber ) { LOGGER . info ( "Recovering state from format {}" , format ) ; LOGGER . debug ( "PersistedState on recovery is: {}" , new String ( persistedState , CharsetUtil . UTF_8 ) ) ; try { if ( format == StateFormat . JSON ) { sessionState ( ) . setFromJson ( persistedState ) ; subscriber . onCompleted ( ) ; } else { subscriber . onError ( new IllegalStateException ( "Unsupported StateFormat " + format ) ) ; } } catch ( Exception ex ) { subscriber . onError ( ex ) ; } } } ) ;
public class QueryExecutor { /** * Execute the query passed using the selection of index definition provided . * The index definitions are presumed to already exist and be up to date for the * { @ link Database } and its underlying { @ link SQLDatabaseQueue } passed to the constructor . * @ param query query to execute . * @ param indexes indexes to use ( this method will select the most appropriate ) . * @ param skip how many results to skip before returning results to caller * @ param limit number of documents the result should be limited to * @ param fields fields to project from the result documents * @ param sortDocument document specifying the order to return results , null to have no sorting * @ return the query result */ public QueryResult find ( Map < String , Object > query , final List < Index > indexes , long skip , long limit , List < String > fields , final List < FieldSort > sortDocument ) throws QueryException { } }
// Validate inputs fields = normaliseFields ( fields ) ; // will throw IllegalArgumentException if there are invalid fields validateFields ( fields ) ; // normalise and validate query by passing into the executors query = QueryValidator . normaliseAndValidateQuery ( query ) ; // Execute the query Boolean [ ] indexesCoverQuery = new Boolean [ ] { false } ; final ChildrenQueryNode root = translateQuery ( query , indexes , indexesCoverQuery ) ; Future < List < String > > result = queue . submit ( new SQLCallable < List < String > > ( ) { @ Override public List < String > call ( SQLDatabase database ) throws Exception { Set < String > docIdSet = executeQueryTree ( root , database ) ; List < String > docIdList ; // sorting if ( sortDocument != null && ! sortDocument . isEmpty ( ) ) { docIdList = sortIds ( docIdSet , sortDocument , indexes , database ) ; } else { docIdList = docIdSet != null ? new ArrayList < String > ( docIdSet ) : null ; } return docIdList ; } } ) ; List < String > docIds ; try { docIds = result . get ( ) ; } catch ( ExecutionException e ) { String message = "Execution error encountered" ; logger . log ( Level . SEVERE , message , e ) ; throw new QueryException ( message , e . getCause ( ) ) ; } catch ( InterruptedException e ) { String message = "Execution interrupted error encountered" ; logger . log ( Level . SEVERE , message , e ) ; throw new QueryException ( message , e . getCause ( ) ) ; } if ( docIds == null ) { return null ; } UnindexedMatcher matcher = matcherForIndexCoverage ( indexesCoverQuery , query ) ; if ( matcher != null ) { String msg = "query could not be executed using indexes alone; falling back to " ; msg += "filtering documents themselves. This will be VERY SLOW as each candidate " ; msg += "document is loaded from the datastore and matched against the query selector." ; logger . log ( Level . WARNING , msg ) ; } return new QueryResult ( docIds , database , fields , skip , limit , matcher ) ;
public class HttpUtil { /** * Returns { @ code true } if the specified message contains an expect header specifying an expectation that is not * supported . Note that this method returns { @ code false } if the expect header is not valid for the message * ( e . g . , the message is a response , or the version on the message is HTTP / 1.0 ) . * @ param message the message * @ return { @ code true } if and only if an expectation is present that is not supported */ static boolean isUnsupportedExpectation ( HttpMessage message ) { } }
if ( ! isExpectHeaderValid ( message ) ) { return false ; } final String expectValue = message . headers ( ) . get ( HttpHeaderNames . EXPECT ) ; return expectValue != null && ! HttpHeaderValues . CONTINUE . toString ( ) . equalsIgnoreCase ( expectValue ) ;
public class QueryExecution { /** * Send request for more data for this query . * NOTE : This method is always run in a background thread ! ! * @ param startRow * Start row needed in return batch */ private void asyncMoreRequest ( int startRow ) { } }
try { DataRequest moreRequest = new DataRequest ( ) ; moreRequest . queryId = queryId ; moreRequest . startRow = startRow ; moreRequest . maxSize = maxBatchSize ; logger . debug ( "Client requesting {} .. {}" , startRow , ( startRow + maxBatchSize - 1 ) ) ; DataResponse response = server . data ( moreRequest ) ; logger . debug ( "Client got response {} .. {}, more={}" , new Object [ ] { response . startRow , ( response . startRow + response . data . size ( ) - 1 ) , response . more } ) ; nextData . add ( new Window ( response . data , response . more ) ) ; } catch ( AvroRemoteException e ) { this . nextData . addError ( toSparqlException ( e ) ) ; } catch ( Throwable t ) { this . nextData . addError ( t ) ; }
public class SelectExtension { /** * selects an item and remembers its position in the selections list * @ param position the global position * @ param fireEvent true if the onClick listener should be called * @ param considerSelectableFlag true if the select method should not select an item if its not selectable */ public void select ( int position , boolean fireEvent , boolean considerSelectableFlag ) { } }
FastAdapter . RelativeInfo < Item > relativeInfo = mFastAdapter . getRelativeInfo ( position ) ; if ( relativeInfo == null || relativeInfo . item == null ) { return ; } select ( relativeInfo . adapter , relativeInfo . item , position , fireEvent , considerSelectableFlag ) ;
public class AsyncFacebookRunner { /** * Invalidate the current user session by removing the access token in * memory , clearing the browser cookies , and calling auth . expireSession * through the API . The application will be notified when logout is * complete via the callback interface . * Note that this method is asynchronous and the callback will be invoked * in a background thread ; operations that affect the UI will need to be * posted to the UI thread or an appropriate handler . * This method is deprecated . See { @ link Facebook } and { @ link com . facebook . Session } for more info . * @ param context * The Android context in which the logout should be called : it * should be the same context in which the login occurred in * order to clear any stored cookies * @ param listener * Callback interface to notify the application when the request * has completed . * @ param state * An arbitrary object used to identify the request when it * returns to the callback . This has no effect on the request * itself . */ @ Deprecated public void logout ( final Context context , final RequestListener listener , final Object state ) { } }
new Thread ( ) { @ Override public void run ( ) { try { String response = fb . logoutImpl ( context ) ; if ( response . length ( ) == 0 || response . equals ( "false" ) ) { listener . onFacebookError ( new FacebookError ( "auth.expireSession failed" ) , state ) ; return ; } listener . onComplete ( response , state ) ; } catch ( FileNotFoundException e ) { listener . onFileNotFoundException ( e , state ) ; } catch ( MalformedURLException e ) { listener . onMalformedURLException ( e , state ) ; } catch ( IOException e ) { listener . onIOException ( e , state ) ; } } } . start ( ) ;
public class AptUtil { /** * Returns the name of corresponding getter . * @ param element the field * @ return getter name * @ author vvakame */ public static String getElementGetter ( Element element ) { } }
// TODO 型 ( boolean ) による絞り込みをするべき String getterName1 = "get" + element . getSimpleName ( ) . toString ( ) ; String getterName2 = "is" + element . getSimpleName ( ) . toString ( ) ; String getterName3 = element . getSimpleName ( ) . toString ( ) ; Element getter = null ; for ( Element method : ElementFilter . methodsIn ( element . getEnclosingElement ( ) . getEnclosedElements ( ) ) ) { String methodName = method . getSimpleName ( ) . toString ( ) ; if ( getterName1 . equalsIgnoreCase ( methodName ) ) { if ( isStatic ( method ) == false && isPublic ( method ) || isPackagePrivate ( method ) ) { getter = method ; break ; } } else if ( getterName2 . equalsIgnoreCase ( methodName ) ) { if ( isStatic ( method ) == false && isPublic ( method ) || isPackagePrivate ( method ) ) { getter = method ; break ; } } else if ( getterName3 . equalsIgnoreCase ( methodName ) ) { if ( isStatic ( method ) == false && isPublic ( method ) || isPackagePrivate ( method ) ) { getter = method ; break ; } } } if ( getter != null ) { return getter . getSimpleName ( ) . toString ( ) ; } else { return null ; }
public class SamplingRuleRecordMarshaller { /** * Marshall the given parameter object . */ public void marshall ( SamplingRuleRecord samplingRuleRecord , ProtocolMarshaller protocolMarshaller ) { } }
if ( samplingRuleRecord == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( samplingRuleRecord . getSamplingRule ( ) , SAMPLINGRULE_BINDING ) ; protocolMarshaller . marshall ( samplingRuleRecord . getCreatedAt ( ) , CREATEDAT_BINDING ) ; protocolMarshaller . marshall ( samplingRuleRecord . getModifiedAt ( ) , MODIFIEDAT_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class AmazonECSClient { /** * Modifies the status of an Amazon ECS container instance . * You can change the status of a container instance to < code > DRAINING < / code > to manually remove an instance from a * cluster , for example to perform system updates , update the Docker daemon , or scale down the cluster size . * When you set a container instance to < code > DRAINING < / code > , Amazon ECS prevents new tasks from being scheduled * for placement on the container instance and replacement service tasks are started on other container instances in * the cluster if the resources are available . Service tasks on the container instance that are in the * < code > PENDING < / code > state are stopped immediately . * Service tasks on the container instance that are in the < code > RUNNING < / code > state are stopped and replaced * according to the service ' s deployment configuration parameters , < code > minimumHealthyPercent < / code > and * < code > maximumPercent < / code > . You can change the deployment configuration of your service using * < a > UpdateService < / a > . * < ul > * < li > * If < code > minimumHealthyPercent < / code > is below 100 % , the scheduler can ignore < code > desiredCount < / code > * temporarily during task replacement . For example , < code > desiredCount < / code > is four tasks , a minimum of 50% * allows the scheduler to stop two existing tasks before starting two new tasks . If the minimum is 100 % , the * service scheduler can ' t remove existing tasks until the replacement tasks are considered healthy . Tasks for * services that do not use a load balancer are considered healthy if they are in the < code > RUNNING < / code > state . * Tasks for services that use a load balancer are considered healthy if they are in the < code > RUNNING < / code > state * and the container instance they are hosted on is reported as healthy by the load balancer . * < / li > * < li > * The < code > maximumPercent < / code > parameter represents an upper limit on the number of running tasks during task * replacement , which enables you to define the replacement batch size . For example , if < code > desiredCount < / code > is * four tasks , a maximum of 200 % starts four new tasks before stopping the four tasks to be drained , provided that * the cluster resources required to do this are available . If the maximum is 100 % , then replacement tasks can ' t * start until the draining tasks have stopped . * < / li > * < / ul > * Any < code > PENDING < / code > or < code > RUNNING < / code > tasks that do not belong to a service are not affected . You must * wait for them to finish or stop them manually . * A container instance has completed draining when it has no more < code > RUNNING < / code > tasks . You can verify this * using < a > ListTasks < / a > . * When you set a container instance to < code > ACTIVE < / code > , the Amazon ECS scheduler can begin scheduling tasks on * the instance again . * @ param updateContainerInstancesStateRequest * @ return Result of the UpdateContainerInstancesState operation returned by the service . * @ throws ServerException * These errors are usually caused by a server issue . * @ throws ClientException * These errors are usually caused by a client action , such as using an action or resource on behalf of a * user that doesn ' t have permissions to use the action or resource , or specifying an identifier that is not * valid . * @ throws InvalidParameterException * The specified parameter is invalid . Review the available parameters for the API request . * @ throws ClusterNotFoundException * The specified cluster could not be found . You can view your available clusters with < a > ListClusters < / a > . * Amazon ECS clusters are Region - specific . * @ sample AmazonECS . UpdateContainerInstancesState * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / ecs - 2014-11-13 / UpdateContainerInstancesState " * target = " _ top " > AWS API Documentation < / a > */ @ Override public UpdateContainerInstancesStateResult updateContainerInstancesState ( UpdateContainerInstancesStateRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeUpdateContainerInstancesState ( request ) ;
public class OpenOAuth2s { /** * 获取access token * @ param code * @ return */ public AccessToken getAccessToken ( String code ) { } }
String url = WxEndpoint . get ( "url.oauth.accesstoken.get" ) ; String formatUrl = String . format ( url , wxClient . getClientId ( ) , wxClient . getClientSecret ( ) , code ) ; logger . debug ( "get access token: {}" , formatUrl ) ; String response = wxClient . get ( formatUrl , false ) ; return JsonMapper . defaultMapper ( ) . fromJson ( response , AccessToken . class ) ;
public class LiKafkaSchemaRegistry { /** * Register a schema to the Kafka schema registry under the provided input name . This method will change the name * of the schema to the provided name if configured to do so . This is useful because certain services ( like Gobblin kafka adaptor and * Camus ) get the schema for a topic by querying for the latest schema with the topic name , requiring the topic * name and schema name to match for all topics . If it is not configured to switch names , this is useful for the case * where the Kafka topic and Avro schema names do not match . This method registers the schema to the schema registry in such a * way that any schema can be written to any topic . * @ param schema { @ link org . apache . avro . Schema } to register . * @ param name Name of the schema when registerd to the schema registry . This name should match the name * of the topic where instances will be published . * @ return schema ID of the registered schema . * @ throws SchemaRegistryException if registration failed */ @ Override public MD5Digest register ( String name , Schema schema ) throws SchemaRegistryException { } }
PostMethod post = new PostMethod ( url ) ; if ( this . switchTopicNames ) { return register ( AvroUtils . switchName ( schema , name ) , post ) ; } else { post . addParameter ( "name" , name ) ; return register ( schema , post ) ; }
public class DescribeDirectConnectGatewaysResult { /** * The Direct Connect gateways . * @ return The Direct Connect gateways . */ public java . util . List < DirectConnectGateway > getDirectConnectGateways ( ) { } }
if ( directConnectGateways == null ) { directConnectGateways = new com . amazonaws . internal . SdkInternalList < DirectConnectGateway > ( ) ; } return directConnectGateways ;
public class CmsListDateMacroFormatter { /** * Returns a default date formatter object . < p > * @ return a default date formatter object */ public static I_CmsListFormatter getDefaultDateFormatter ( ) { } }
return new CmsListDateMacroFormatter ( Messages . get ( ) . container ( Messages . GUI_LIST_DATE_FORMAT_1 ) , Messages . get ( ) . container ( Messages . GUI_LIST_DATE_FORMAT_NEVER_0 ) ) ;
public class Channel { /** * Join peer to channel * @ param orderer The orderer to get the genesis block . * @ param peer the peer to join the channel . * @ param peerOptions see { @ link PeerOptions } * @ return * @ throws ProposalException */ public Channel joinPeer ( Orderer orderer , Peer peer , PeerOptions peerOptions ) throws ProposalException { } }
logger . debug ( format ( "Channel %s joining peer %s, url: %s" , name , peer . getName ( ) , peer . getUrl ( ) ) ) ; if ( shutdown ) { throw new ProposalException ( format ( "Channel %s has been shutdown." , name ) ) ; } Channel peerChannel = peer . getChannel ( ) ; if ( null != peerChannel && peerChannel != this ) { throw new ProposalException ( format ( "Can not add peer %s to channel %s because it already belongs to channel %s." , peer . getName ( ) , name , peerChannel . getName ( ) ) ) ; } logger . info ( format ( "%s joining %s." , toString ( ) , peer ) ) ; if ( genesisBlock == null && orderers . isEmpty ( ) ) { ProposalException e = new ProposalException ( "Channel missing genesis block and no orderers configured" ) ; logger . error ( e . getMessage ( ) , e ) ; } try { genesisBlock = getGenesisBlock ( orderer ) ; logger . debug ( format ( "Channel %s got genesis block" , name ) ) ; final Channel systemChannel = newSystemChannel ( client ) ; // channel is not really created and this is targeted to system channel TransactionContext transactionContext = systemChannel . getTransactionContext ( ) ; FabricProposal . Proposal joinProposal = JoinPeerProposalBuilder . newBuilder ( ) . context ( transactionContext ) . genesisBlock ( genesisBlock ) . build ( ) ; logger . debug ( "Getting signed proposal." ) ; SignedProposal signedProposal = getSignedProposal ( transactionContext , joinProposal ) ; logger . debug ( "Got signed proposal." ) ; addPeer ( peer , peerOptions ) ; // need to add peer . Collection < ProposalResponse > resp = sendProposalToPeers ( new ArrayList < > ( Collections . singletonList ( peer ) ) , signedProposal , transactionContext ) ; ProposalResponse pro = resp . iterator ( ) . next ( ) ; if ( pro . getStatus ( ) == ProposalResponse . Status . SUCCESS ) { logger . info ( format ( "Peer %s joined into channel %s" , peer , toString ( ) ) ) ; } else { removePeerInternal ( peer ) ; throw new ProposalException ( format ( "Join peer to channel %s failed. Status %s, details: %s" , name , pro . getStatus ( ) . toString ( ) , pro . getMessage ( ) ) ) ; } } catch ( ProposalException e ) { logger . error ( format ( "%s removing peer %s due to exception %s" , toString ( ) , peer , e . getMessage ( ) ) ) ; removePeerInternal ( peer ) ; logger . error ( e ) ; throw e ; } catch ( Exception e ) { logger . error ( format ( "%s removing peer %s due to exception %s" , toString ( ) , peer , e . getMessage ( ) ) ) ; peers . remove ( peer ) ; logger . error ( e ) ; throw new ProposalException ( e . getMessage ( ) , e ) ; } return this ;
public class World { /** * Sets the { @ code defaultParent } { @ code Actor } as the default for this { @ code World } . ( INTERNAL ONLY ) * @ param defaultParent the { @ code Actor } to use as the default parent */ synchronized void setDefaultParent ( final Actor defaultParent ) { } }
if ( defaultParent != null && this . defaultParent != null ) { throw new IllegalStateException ( "Default parent already exists." ) ; } this . defaultParent = defaultParent ;
public class DartSuperAccessorsPass { /** * Returns true if this node is or is enclosed by an instance member definition * ( non - static method , getter or setter ) . */ private static boolean isInsideInstanceMember ( Node n ) { } }
while ( n != null ) { if ( n . isMemberFunctionDef ( ) || n . isGetterDef ( ) || n . isSetterDef ( ) || n . isComputedProp ( ) ) { return ! n . isStaticMember ( ) ; } if ( n . isClass ( ) ) { // Stop at the first enclosing class . return false ; } n = n . getParent ( ) ; } return false ;
public class CredentialFactory { /** * Returns shared staticHttpTransport instance ; initializes staticHttpTransport if it hasn ' t * already been initialized . */ private static synchronized HttpTransport getStaticHttpTransport ( ) throws IOException , GeneralSecurityException { } }
if ( staticHttpTransport == null ) { staticHttpTransport = HttpTransportFactory . createHttpTransport ( HttpTransportType . JAVA_NET ) ; } return staticHttpTransport ;