signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class JsonRpcHttpAsyncClient { /** * Set the request headers . * @ param request the request object * @ param headers to be used */ private void addHeaders ( HttpRequest request , Map < String , String > headers ) { } }
for ( Map . Entry < String , String > key : headers . entrySet ( ) ) { request . addHeader ( key . getKey ( ) , key . getValue ( ) ) ; }
public class DistributedQueue { /** * Wait until any pending puts are committed * @ param waitTime max wait time * @ param timeUnit time unit * @ return true if the flush was successful , false if it timed out first * @ throws InterruptedException if thread was interrupted */ @ Override public boolean flushPuts ( long waitTime , TimeUnit timeUnit ) throws InterruptedException { } }
long msWaitRemaining = TimeUnit . MILLISECONDS . convert ( waitTime , timeUnit ) ; synchronized ( putCount ) { while ( putCount . get ( ) > 0 ) { if ( msWaitRemaining <= 0 ) { return false ; } long startMs = System . currentTimeMillis ( ) ; putCount . wait ( msWaitRemaining ) ; long elapsedMs = System . currentTimeMillis ( ) - startMs ; msWaitRemaining -= elapsedMs ; } } return true ;
public class CiphererWithStaticKeyImpl { /** * Encrypts / decrypts a message based on the underlying mode of operation . * @ param message if in encryption mode , the clear - text message , otherwise * the message to decrypt * @ return if in encryption mode , the encrypted message , otherwise the * decrypted message * @ throws SymmetricEncryptionException on runtime errors * @ see # setMode ( Mode ) */ public byte [ ] encrypt ( byte [ ] message ) { } }
try { final Cipher cipher = ( ( ( provider == null ) || ( provider . length ( ) == 0 ) ) ? Cipher . getInstance ( cipherAlgorithm ) : Cipher . getInstance ( cipherAlgorithm , provider ) ) ; switch ( mode ) { case ENCRYPT : cipher . init ( Cipher . ENCRYPT_MODE , keySpec , initializationVectorSpec ) ; break ; case DECRYPT : cipher . init ( Cipher . DECRYPT_MODE , keySpec , initializationVectorSpec ) ; break ; default : throw new SymmetricEncryptionException ( "error encrypting/decrypting message: invalid mode; mode=" + mode ) ; } return cipher . doFinal ( message ) ; } catch ( Exception e ) { throw new SymmetricEncryptionException ( "error encrypting/decrypting message; mode=" + mode , e ) ; }
public class CommerceTierPriceEntryUtil { /** * Returns the last commerce tier price entry in the ordered set where uuid = & # 63 ; . * @ param uuid the uuid * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the last matching commerce tier price entry , or < code > null < / code > if a matching commerce tier price entry could not be found */ public static CommerceTierPriceEntry fetchByUuid_Last ( String uuid , OrderByComparator < CommerceTierPriceEntry > orderByComparator ) { } }
return getPersistence ( ) . fetchByUuid_Last ( uuid , orderByComparator ) ;
public class QueryMaker { /** * 增加嵌套插 * @ param boolQuery * @ param where * @ param subQuery */ private void addSubQuery ( BoolQueryBuilder boolQuery , Where where , QueryBuilder subQuery ) { } }
if ( where instanceof Condition ) { Condition condition = ( Condition ) where ; if ( condition . isNested ( ) ) { boolean isNestedQuery = subQuery instanceof NestedQueryBuilder ; InnerHitBuilder ihb = null ; if ( condition . getInnerHits ( ) != null ) { try ( JsonXContentParser parser = new JsonXContentParser ( NamedXContentRegistry . EMPTY , LoggingDeprecationHandler . INSTANCE , new JsonFactory ( ) . createParser ( condition . getInnerHits ( ) ) ) ) { ihb = InnerHitBuilder . fromXContent ( parser ) ; } catch ( IOException e ) { throw new IllegalArgumentException ( "couldn't parse inner_hits: " + e . getMessage ( ) , e ) ; } } // bugfix # 628 if ( "missing" . equalsIgnoreCase ( String . valueOf ( condition . getValue ( ) ) ) && ( condition . getOpear ( ) == Condition . OPEAR . IS || condition . getOpear ( ) == Condition . OPEAR . EQ ) ) { NestedQueryBuilder q = isNestedQuery ? ( NestedQueryBuilder ) subQuery : QueryBuilders . nestedQuery ( condition . getNestedPath ( ) , QueryBuilders . boolQuery ( ) . mustNot ( subQuery ) , ScoreMode . None ) ; if ( ihb != null ) { q . innerHit ( ihb ) ; } boolQuery . mustNot ( q ) ; return ; } if ( ! isNestedQuery ) { subQuery = QueryBuilders . nestedQuery ( condition . getNestedPath ( ) , subQuery , ScoreMode . None ) ; } if ( ihb != null ) { ( ( NestedQueryBuilder ) subQuery ) . innerHit ( ihb ) ; } } else if ( condition . isChildren ( ) ) { subQuery = JoinQueryBuilders . hasChildQuery ( condition . getChildType ( ) , subQuery , ScoreMode . None ) ; } } // zhongshu - comment 将subQuery对象纳入到boolQuery中 , 即boolQuery是上一级 , subQuery是下一级 if ( where . getConn ( ) == CONN . AND ) { boolQuery . must ( subQuery ) ; } else { boolQuery . should ( subQuery ) ; }
public class WorkflowTriggerHistoriesInner { /** * Resubmits a workflow run based on the trigger history . * @ param resourceGroupName The resource group name . * @ param workflowName The workflow name . * @ param triggerName The workflow trigger name . * @ param historyName The workflow trigger history name . Corresponds to the run name for triggers that resulted in a run . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceResponse } object if successful . */ public Observable < Void > resubmitAsync ( String resourceGroupName , String workflowName , String triggerName , String historyName ) { } }
return resubmitWithServiceResponseAsync ( resourceGroupName , workflowName , triggerName , historyName ) . map ( new Func1 < ServiceResponse < Void > , Void > ( ) { @ Override public Void call ( ServiceResponse < Void > response ) { return response . body ( ) ; } } ) ;
public class BaseLevel1 { /** * computes a vector by a scalar product . * @ param N * @ param alpha * @ param X */ @ Override public void scal ( long N , double alpha , INDArray X ) { } }
if ( Nd4j . getExecutioner ( ) . getProfilingMode ( ) == OpExecutioner . ProfilingMode . ALL ) OpProfiler . getInstance ( ) . processBlasCall ( false , X ) ; if ( X . isSparse ( ) ) { Nd4j . getSparseBlasWrapper ( ) . level1 ( ) . scal ( N , alpha , X ) ; } else if ( X . data ( ) . dataType ( ) == DataType . DOUBLE ) dscal ( N , alpha , X , BlasBufferUtil . getBlasStride ( X ) ) ; else if ( X . data ( ) . dataType ( ) == DataType . FLOAT ) sscal ( N , ( float ) alpha , X , BlasBufferUtil . getBlasStride ( X ) ) ; else if ( X . data ( ) . dataType ( ) == DataType . HALF ) Nd4j . getExecutioner ( ) . exec ( new ScalarMultiplication ( X , alpha ) ) ;
public class Replication { /** * Triggers a replication request . */ public ReplicationResult trigger ( ) { } }
assertNotEmpty ( source , "Source" ) ; assertNotEmpty ( target , "Target" ) ; InputStream response = null ; try { JsonObject json = createJson ( ) ; if ( log . isLoggable ( Level . FINE ) ) { log . fine ( json . toString ( ) ) ; } final URI uri = new DatabaseURIHelper ( client . getBaseUri ( ) ) . path ( "_replicate" ) . build ( ) ; response = client . post ( uri , json . toString ( ) ) ; final InputStreamReader reader = new InputStreamReader ( response , "UTF-8" ) ; return client . getGson ( ) . fromJson ( reader , ReplicationResult . class ) ; } catch ( UnsupportedEncodingException e ) { // This should never happen as every implementation of the java platform is required // to support UTF - 8. throw new RuntimeException ( e ) ; } finally { close ( response ) ; }
public class AiMesh { /** * Returns the texture coordinates as n - dimensional vector . < p > * This method is part of the wrapped API ( see { @ link AiWrapperProvider } * for details on wrappers ) . < p > * The built - in behavior is to return a { @ link AiVector } . * @ param vertex the vertex index * @ param coords the texture coordinate set * @ param wrapperProvider the wrapper provider ( used for type inference ) * @ return the texture coordinates wrapped as object */ public < V3 , M4 , C , N , Q > V3 getWrappedTexCoords ( int vertex , int coords , AiWrapperProvider < V3 , M4 , C , N , Q > wrapperProvider ) { } }
if ( ! hasTexCoords ( coords ) ) { throw new IllegalStateException ( "mesh has no texture coordinate set " + coords ) ; } checkVertexIndexBounds ( vertex ) ; return wrapperProvider . wrapVector3f ( m_texcoords [ coords ] , vertex * 3 * SIZEOF_FLOAT , getNumUVComponents ( coords ) ) ;
public class HeadDocumentRepositoryMongoImpl { /** * { @ inheritDoc } */ @ Override public final HeadDocument findByRootAndString ( final RootDocument rootDocument , final String string ) { } }
final HeadDocument headDocument = findByFileAndString ( rootDocument . getFilename ( ) , string ) ; if ( headDocument == null ) { return null ; } final Head head = headDocument . getGedObject ( ) ; head . setParent ( rootDocument . getGedObject ( ) ) ; return headDocument ;
public class LogPattern { /** * Generate a log string . */ public StringBuilder generate ( Log log ) { } }
StringBuilder s = new StringBuilder ( ) ; for ( int i = 0 ; i < parts . length ; ++ i ) parts [ i ] . append ( s , log ) ; if ( log . trace != null ) appendStack ( s , log . trace ) ; return s ;
public class Strings { /** * Java - unescapes all the elements in the target set . * @ param target the list of Strings to be unescaped . * If non - String objects , toString ( ) will be called . * @ return a Set with the result of each * each element of the target . * @ since 2.0.11 */ public Set < String > setUnescapeJava ( final Set < ? > target ) { } }
if ( target == null ) { return null ; } final Set < String > result = new LinkedHashSet < String > ( target . size ( ) + 2 ) ; for ( final Object element : target ) { result . add ( unescapeJava ( element ) ) ; } return result ;
public class BusinessdayCalendarExcludingTARGETHolidays { /** * Test a given date for being easter sunday . * The method uses the algorithms sometimes cited as Meeus , Jones , Butcher Gregorian algorithm . * Taken from http : / / en . wikipedia . org / wiki / Computus * @ param date The date to check . * @ return True , if date is easter sunday . */ public static boolean isEasterSunday ( LocalDate date ) { } }
int y = date . getYear ( ) ; int a = y % 19 ; int b = y / 100 ; int c = y % 100 ; int d = b / 4 ; int e = b % 4 ; int f = ( b + 8 ) / 25 ; int g = ( b - f + 1 ) / 3 ; int h = ( 19 * a + b - d - g + 15 ) % 30 ; int i = c / 4 ; int k = c % 4 ; int l = ( 32 + 2 * e + 2 * i - h - k ) % 7 ; int m = ( a + 11 * h + 22 * l ) / 451 ; int easterSundayMonth = ( h + l - 7 * m + 114 ) / 31 ; int easterSundayDay = ( ( h + l - 7 * m + 114 ) % 31 ) + 1 ; int month = date . getMonthValue ( ) ; int day = date . getDayOfMonth ( ) ; return ( easterSundayMonth == month ) && ( easterSundayDay == day ) ;
public class ScopeDesktopWindowManager { /** * and unique within a single menu given either shortcutletter or pos */ public QuickMenuItem getQuickMenuItemByAction ( String action ) { } }
List < QuickMenuItem > itemList = getQuickMenuItemList ( ) ; for ( QuickMenuItem item : itemList ) { if ( item . getActionName ( ) . equals ( action ) ) { return item ; } } return null ;
public class ApproximateHistogram { /** * Returns a compact byte - buffer representation of this ApproximateHistogram object * storing actual values as opposed to histogram bins * Requires 3 + 4 * count bytes of storage with count & lt ; = 127 * @ param buf ByteBuffer to write the ApproximateHistogram to */ public void toBytesCompact ( ByteBuffer buf ) { } }
Preconditions . checkState ( canStoreCompact ( ) , "Approximate histogram cannot be stored in compact form" ) ; buf . putShort ( ( short ) ( - 1 * size ) ) ; // use negative size to indicate compact storage final long exactCount = getExactCount ( ) ; if ( exactCount != count ) { // use negative count to indicate approximate bins buf . put ( ( byte ) ( - 1 * ( count - exactCount ) ) ) ; // store actual values instead of bins for ( int i = 0 ; i < binCount ; ++ i ) { // repeat each value bins [ i ] times for approximate bins if ( ( bins [ i ] & APPROX_FLAG_BIT ) != 0 ) { for ( int k = 0 ; k < ( bins [ i ] & COUNT_BITS ) ; ++ k ) { buf . putFloat ( positions [ i ] ) ; } } } // tack on min and max since they may be lost int the approximate bins buf . putFloat ( min ) ; buf . putFloat ( max ) ; } buf . put ( ( byte ) exactCount ) ; // store actual values instead of bins for ( int i = 0 ; i < binCount ; ++ i ) { // repeat each value bins [ i ] times for exact bins if ( ( bins [ i ] & APPROX_FLAG_BIT ) == 0 ) { for ( int k = 0 ; k < ( bins [ i ] & COUNT_BITS ) ; ++ k ) { buf . putFloat ( positions [ i ] ) ; } } }
public class CommerceShipmentItemLocalServiceBaseImpl { /** * Performs a dynamic query on the database and returns a range of the matching rows . * Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link com . liferay . portal . kernel . dao . orm . QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link com . liferay . portal . kernel . dao . orm . QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link com . liferay . commerce . model . impl . CommerceShipmentItemModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order . * @ param dynamicQuery the dynamic query * @ param start the lower bound of the range of model instances * @ param end the upper bound of the range of model instances ( not inclusive ) * @ return the range of matching rows */ @ Override public < T > List < T > dynamicQuery ( DynamicQuery dynamicQuery , int start , int end ) { } }
return commerceShipmentItemPersistence . findWithDynamicQuery ( dynamicQuery , start , end ) ;
public class PersistenceDelegator { /** * Writes an entity into Persistence cache . ( Actual database write is done * while flushing ) */ public void persist ( Object e ) { } }
if ( e == null ) { throw new IllegalArgumentException ( "Entity object is invalid, operation failed. Please check previous log message for details" ) ; } // Create an object graph of the entity object . ObjectGraph graph = new GraphGenerator ( ) . generateGraph ( e , this ) ; // Call persist on each node in object graph . Node node = graph . getHeadNode ( ) ; try { // Get write lock before writing object required for transaction . lock . writeLock ( ) . lock ( ) ; node . setPersistenceDelegator ( this ) ; node . persist ( ) ; // build flush stack . flushManager . buildFlushStack ( node , com . impetus . kundera . persistence . context . EventLog . EventType . INSERT ) ; // Flushing data . flush ( ) ; // Add node to persistence context after successful flush . getPersistenceCache ( ) . getMainCache ( ) . addHeadNode ( node ) ; } finally { lock . writeLock ( ) . unlock ( ) ; } // Unlocking object . graph . clear ( ) ; graph = null ; if ( log . isDebugEnabled ( ) ) { log . debug ( "Data persisted successfully for entity {}." , e . getClass ( ) ) ; }
public class Waiter { /** * Compares Activity classes . * @ param activityClass the Activity class to compare * @ param currentActivity the Activity that is currently active * @ return true if Activity classes match */ private boolean isActivityMatching ( Class < ? extends Activity > activityClass , Activity currentActivity ) { } }
if ( currentActivity != null && currentActivity . getClass ( ) . equals ( activityClass ) ) { return true ; } return false ;
public class JMXExtension { /** * { @ inheritDoc } */ @ Override public void initializeParsers ( ExtensionParsingContext context ) { } }
context . setSubsystemXmlMapping ( SUBSYSTEM_NAME , Namespace . JMX_1_0 . getUriString ( ) , JMXSubsystemParser_1_0 :: new ) ; context . setSubsystemXmlMapping ( SUBSYSTEM_NAME , Namespace . JMX_1_1 . getUriString ( ) , JMXSubsystemParser_1_1 :: new ) ; context . setSubsystemXmlMapping ( SUBSYSTEM_NAME , Namespace . JMX_1_2 . getUriString ( ) , JMXSubsystemParser_1_2 :: new ) ; context . setSubsystemXmlMapping ( SUBSYSTEM_NAME , Namespace . JMX_1_3 . getUriString ( ) , JMXSubsystemParser_1_3 :: new ) ;
public class MessageFlush { /** * Deliver the message * @ param os the physical output stream * @ param writerHttp the writer context */ @ Override public void deliver ( WriteStream os , OutHttp2 outHttp ) throws IOException { } }
try { os . flush ( ) ; } finally { _future . ok ( null ) ; }
public class FctBnTradeProcessors { /** * < p > Lazy initialize retrievers . < / p > * @ param pAddParam additional param * @ throws Exception - an exception */ protected final void lazyInitRetrievers ( final Map < String , Object > pAddParam ) throws Exception { } }
if ( this . retrievers == null ) { this . retrievers = new HashMap < String , ICsvDataRetriever > ( ) ; GoodsPriceListRetriever < RS > gpr = new GoodsPriceListRetriever < RS > ( ) ; gpr . setSrvI18n ( getSrvI18n ( ) ) ; gpr . setSrvOrm ( getSrvOrm ( ) ) ; gpr . setSrvDatabase ( getSrvDatabase ( ) ) ; gpr . setSrvAccSettings ( getSrvAccSettings ( ) ) ; this . retrievers . put ( "GoodsPriceListRetriever" , gpr ) ; ServicePriceListRetriever < RS > spr = new ServicePriceListRetriever < RS > ( ) ; spr . setSrvI18n ( getSrvI18n ( ) ) ; spr . setSrvOrm ( getSrvOrm ( ) ) ; spr . setSrvAccSettings ( getSrvAccSettings ( ) ) ; this . retrievers . put ( "ServicePriceListRetriever" , spr ) ; }
public class ArrayWrappedCallByReference { /** * processes a method call looking for parameters that are arrays . If this array was seen earlier as a simple wrapping array , then it marks it as being * having been used as a parameter . */ private void processMethodCall ( ) { } }
if ( "invoke" . equals ( getNameConstantOperand ( ) ) && "java/lang/reflect/Method" . equals ( getClassConstantOperand ( ) ) ) { return ; } String sig = getSigConstantOperand ( ) ; List < String > args = SignatureUtils . getParameterSignatures ( sig ) ; if ( stack . getStackDepth ( ) >= args . size ( ) ) { for ( int i = 0 ; i < args . size ( ) ; i ++ ) { String argSig = args . get ( i ) ; if ( argSig . startsWith ( Values . SIG_ARRAY_PREFIX ) ) { OpcodeStack . Item itm = stack . getStackItem ( args . size ( ) - i - 1 ) ; int arrayReg = itm . getRegisterNumber ( ) ; WrapperInfo wi = wrappers . get ( Integer . valueOf ( arrayReg ) ) ; if ( wi != null ) { wi . wasArg = true ; } } } }
public class DependencyAnalyzer { /** * Checks the dependencies for a package from the " forbidden " section . * @ param dependencies * Dependency definition to use . * @ param forbiddenPkg * Package with forbidden imports . * @ param classInfo * Information extracted from the class . * @ return List of errors - may be empty but is never < code > null < / code > . */ private static List < DependencyError > checkForbiddenSection ( final Dependencies dependencies , final Package < NotDependsOn > forbiddenPkg , final ClassInfo classInfo ) { } }
final List < DependencyError > errors = new ArrayList < DependencyError > ( ) ; final Iterator < String > it = classInfo . getImports ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { final String importedPkg = it . next ( ) ; if ( ! importedPkg . equals ( classInfo . getPackageName ( ) ) ) { final NotDependsOn ndo = Utils . findForbiddenByName ( dependencies . getAlwaysForbidden ( ) , importedPkg ) ; if ( ndo != null ) { errors . add ( new DependencyError ( classInfo . getName ( ) , importedPkg , ndo . getComment ( ) ) ) ; } else { final NotDependsOn dep = Utils . findForbiddenByName ( forbiddenPkg . getDependencies ( ) , importedPkg ) ; if ( dep != null ) { final String comment ; if ( dep . getComment ( ) == null ) { comment = forbiddenPkg . getComment ( ) ; } else { comment = dep . getComment ( ) ; } errors . add ( new DependencyError ( classInfo . getName ( ) , importedPkg , comment ) ) ; } } } } return errors ;
public class InstanceConverterFactory { /** * Gets an appropriate { @ link PropositionConverter } for constructing a * PROTEMPA proposition definition from the given Protege proposition * instance . * @ param proposition a Protege proposition { @ link Proposition } instance . * @ return an appropriate { @ link PropositionConverter } object , or * < code > null < / code > if the given * < code > proposition < / code > is * < code > null < / code > . * @ throws AssertionError if the given * < code > proposition < / code > does not have a type in the Protege * < code > Proposition < / code > class hierarchy . */ PropositionConverter getInstance ( Instance proposition ) throws KnowledgeSourceReadException { } }
if ( proposition == null ) { return null ; } else { if ( this . converterMap == null ) { populateConverterMap ( ) ; } Collection < Cls > types = ( Collection < Cls > ) proposition . getDirectTypes ( ) ; for ( Cls cls : types ) { PropositionConverter pc = this . converterMap . get ( cls ) ; if ( pc != null ) { return pc ; } } return null ; }
public class BootstrapHandlers { /** * Remove a configuration given its unique name from the given { @ link * ServerBootstrap } * @ param b a server bootstrap * @ param name a configuration name */ public static ServerBootstrap removeConfiguration ( ServerBootstrap b , String name ) { } }
Objects . requireNonNull ( b , "bootstrap" ) ; Objects . requireNonNull ( name , "name" ) ; if ( b . config ( ) . childHandler ( ) != null ) { b . childHandler ( removeConfiguration ( b . config ( ) . childHandler ( ) , name ) ) ; } return b ;
public class CommonOps_DDRM { /** * Returns the absolute value of the element in the matrix that has the smallest absolute value . < br > * < br > * Min { | a < sub > ij < / sub > | } for all i and j < br > * @ param a A matrix . Not modified . * @ return The max element value of the matrix . */ public static double elementMinAbs ( DMatrixD1 a ) { } }
final int size = a . getNumElements ( ) ; double min = Double . MAX_VALUE ; for ( int i = 0 ; i < size ; i ++ ) { double val = Math . abs ( a . get ( i ) ) ; if ( val < min ) { min = val ; } } return min ;
public class DateUtils { /** * Given a string that may be a date or a date range , extract a interval of * dates from that date range , up to the end milisecond of the last day . * @ see DateUtils # extractDateInterval ( String ) which returns a pair of DateMidnights . * @ param eventDate a string containing a dwc : eventDate from which to extract an interval . * @ return an interval from the beginning of event date to the end of event date . */ public static Interval extractInterval ( String eventDate ) { } }
Interval result = null ; DateTimeParser [ ] parsers = { DateTimeFormat . forPattern ( "yyyy-MM" ) . getParser ( ) , DateTimeFormat . forPattern ( "yyyy" ) . getParser ( ) , ISODateTimeFormat . dateOptionalTimeParser ( ) . getParser ( ) } ; DateTimeFormatter formatter = new DateTimeFormatterBuilder ( ) . append ( null , parsers ) . toFormatter ( ) ; if ( eventDate != null && eventDate . contains ( "/" ) && isRange ( eventDate ) ) { String [ ] dateBits = eventDate . split ( "/" ) ; try { // must be at least a 4 digit year . if ( dateBits [ 0 ] . length ( ) > 3 && dateBits [ 1 ] . length ( ) > 3 ) { DateMidnight startDate = DateMidnight . parse ( dateBits [ 0 ] , formatter ) ; DateTime endDate = DateTime . parse ( dateBits [ 1 ] , formatter ) ; logger . debug ( startDate ) ; logger . debug ( endDate ) ; if ( dateBits [ 1 ] . length ( ) == 4 ) { result = new Interval ( startDate , endDate . plusMonths ( 12 ) . minus ( 1l ) ) ; } else if ( dateBits [ 1 ] . length ( ) == 7 ) { result = new Interval ( startDate , endDate . plusMonths ( 1 ) . minus ( 1l ) ) ; } else { result = new Interval ( startDate , endDate . plusDays ( 1 ) . minus ( 1l ) ) ; } logger . debug ( result ) ; } } catch ( Exception e ) { // not a date range logger . error ( e . getMessage ( ) ) ; } } else { try { DateMidnight startDate = DateMidnight . parse ( eventDate , formatter ) ; logger . debug ( startDate ) ; if ( eventDate . length ( ) == 4 ) { DateTime endDate = startDate . toDateTime ( ) . plusMonths ( 12 ) . minus ( 1l ) ; result = new Interval ( startDate , endDate ) ; logger . debug ( result ) ; } else if ( eventDate . length ( ) == 7 ) { DateTime endDate = startDate . toDateTime ( ) . plusMonths ( 1 ) . minus ( 1l ) ; result = new Interval ( startDate , endDate ) ; logger . debug ( result ) ; } else { DateTime endDate = startDate . toDateTime ( ) . plusDays ( 1 ) . minus ( 1l ) ; result = new Interval ( startDate , endDate ) ; logger . debug ( result ) ; } } catch ( Exception e ) { // not a date logger . error ( e . getMessage ( ) ) ; } } return result ;
public class nd6ravariables_binding { /** * Use this API to fetch nd6ravariables _ binding resource of given name . */ public static nd6ravariables_binding get ( nitro_service service , Long vlan ) throws Exception { } }
nd6ravariables_binding obj = new nd6ravariables_binding ( ) ; obj . set_vlan ( vlan ) ; nd6ravariables_binding response = ( nd6ravariables_binding ) obj . get_resource ( service ) ; return response ;
public class Proxy { /** * This method identifies whether we are able to close this proxy * object . If this object represents a session object , then this * can only be closed if we have not been closed and if the connection * has not been closed . If it represents a connection , then we * can only close if we have not already been closed . * @ return Returns true if we have already been closed or if the * underlying connection has been closed . */ public boolean isClosed ( ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "isClosed" ) ; boolean retValue = false ; if ( connectionProxy == null ) { retValue = closed ; } else { retValue = closed || connectionProxy . isClosed ( ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "isClosed" , "" + retValue ) ; return retValue ;
public class VpnSitesInner { /** * Creates a VpnSite resource if it doesn ' t exist else updates the existing VpnSite . * @ param resourceGroupName The resource group name of the VpnSite . * @ param vpnSiteName The name of the VpnSite being created or updated . * @ param vpnSiteParameters Parameters supplied to create or update VpnSite . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable for the request */ public Observable < ServiceResponse < VpnSiteInner > > createOrUpdateWithServiceResponseAsync ( String resourceGroupName , String vpnSiteName , VpnSiteInner vpnSiteParameters ) { } }
if ( this . client . subscriptionId ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.subscriptionId() is required and cannot be null." ) ; } if ( resourceGroupName == null ) { throw new IllegalArgumentException ( "Parameter resourceGroupName is required and cannot be null." ) ; } if ( vpnSiteName == null ) { throw new IllegalArgumentException ( "Parameter vpnSiteName is required and cannot be null." ) ; } if ( vpnSiteParameters == null ) { throw new IllegalArgumentException ( "Parameter vpnSiteParameters is required and cannot be null." ) ; } Validator . validate ( vpnSiteParameters ) ; final String apiVersion = "2018-06-01" ; Observable < Response < ResponseBody > > observable = service . createOrUpdate ( this . client . subscriptionId ( ) , resourceGroupName , vpnSiteName , apiVersion , vpnSiteParameters , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) ; return client . getAzureClient ( ) . getPutOrPatchResultAsync ( observable , new TypeToken < VpnSiteInner > ( ) { } . getType ( ) ) ;
public class WebAppFilterManager { /** * Returns a FilterInstanceWrapper object corresponding to the passed in * filter name . If the filter inst has previously been created , return that * instance . . . if not , then create a new filter instance * @ param filterName * - String containing the name of the filter inst to find / create * @ return a FilterInstance object corresponding to the passed in filter * name * @ throws ServletException */ public FilterInstanceWrapper getFilterInstanceWrapper ( String filterName ) throws ServletException { } }
if ( com . ibm . ejs . ras . TraceComponent . isAnyTracingEnabled ( ) && logger . isLoggable ( Level . FINE ) ) logger . entering ( CLASS_NAME , "getFilterInstanceWrapper" , "entry for " + filterName ) ; try { FilterInstanceWrapper filterInstW ; // see if the filter is already loaded filterInstW = ( FilterInstanceWrapper ) ( _filterWrappers . get ( filterName ) ) ; if ( filterInstW == null ) { // PM01682 Start synchronized ( webAppConfig . getFilterInfo ( filterName ) ) { // may be more are waiting for lock , check and see if the filter is already loaded filterInstW = ( FilterInstanceWrapper ) ( _filterWrappers . get ( filterName ) ) ; if ( filterInstW == null ) { // filter not loaded yet . . . create an instance wrapper filterInstW = loadFilter ( filterName ) ; } } // PM01682 End } if ( com . ibm . ejs . ras . TraceComponent . isAnyTracingEnabled ( ) && logger . isLoggable ( Level . FINE ) ) { logger . exiting ( CLASS_NAME , "getFilterInstanceWrapper" , "exit for " + filterName ) ; } return filterInstW ; } catch ( ServletException e ) { // logServletError ( filterName , // nls . getString ( " Failed . to . load . filter " , " Failed to load filter " ) , com . ibm . wsspi . webcontainer . util . FFDCWrapper . processException ( e , "com.ibm.ws.webcontainer.filter.WebAppFilterManager.getFilterInstanceWrapper" , "166" , this ) ; throw e ; } catch ( Throwable th ) { // logServletError ( filterName , // nls . getString ( " Failed . to . load . filter " , " Failed to load filter " ) , // th ) ; com . ibm . wsspi . webcontainer . util . FFDCWrapper . processException ( th , "com.ibm.ws.webcontainer.filter.WebAppFilterManager.getFilterInstanceWrapper" , "172" , this ) ; throw new ServletException ( MessageFormat . format ( "Filter [{0}]: could not be loaded" , new Object [ ] { filterName } ) , th ) ; }
public class AuthUtils { /** * Get all of the configured Credential Renwer Plugins . * @ param storm _ conf the storm configuration to use . * @ return the configured credential renewers . */ public static Collection < ICredentialsRenewer > GetCredentialRenewers ( Map conf ) { } }
try { Set < ICredentialsRenewer > ret = new HashSet < ICredentialsRenewer > ( ) ; Collection < String > clazzes = ( Collection < String > ) conf . get ( Config . NIMBUS_CREDENTIAL_RENEWERS ) ; if ( clazzes != null ) { for ( String clazz : clazzes ) { ICredentialsRenewer inst = ( ICredentialsRenewer ) Class . forName ( clazz ) . newInstance ( ) ; inst . prepare ( conf ) ; ret . add ( inst ) ; } } return ret ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; }
public class ListLaunchPathsRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ListLaunchPathsRequest listLaunchPathsRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( listLaunchPathsRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( listLaunchPathsRequest . getAcceptLanguage ( ) , ACCEPTLANGUAGE_BINDING ) ; protocolMarshaller . marshall ( listLaunchPathsRequest . getProductId ( ) , PRODUCTID_BINDING ) ; protocolMarshaller . marshall ( listLaunchPathsRequest . getPageSize ( ) , PAGESIZE_BINDING ) ; protocolMarshaller . marshall ( listLaunchPathsRequest . getPageToken ( ) , PAGETOKEN_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class JSONAssert { /** * Asserts that the JSONArray provided matches the expected string . If it isn ' t it throws an * { @ link AssertionError } . * @ param message Error message to be displayed in case of assertion failure * @ param expectedStr Expected JSON string * @ param actualStr String to compare * @ param strict Enables strict checking * @ throws JSONException JSON parsing error */ public static void assertEquals ( String message , String expectedStr , String actualStr , boolean strict ) throws JSONException { } }
assertEquals ( message , expectedStr , actualStr , strict ? JSONCompareMode . STRICT : JSONCompareMode . LENIENT ) ;
public class UnionPayApi { /** * 文件传输类接口 * @ param reqData * 请求参数 * @ return { String } */ public static String fileTransfer ( Map < String , String > reqData ) { } }
return HttpUtils . post ( SDKConfig . getConfig ( ) . getFileTransUrl ( ) , reqData ) ;
public class MoleculeSignature { /** * Builder for molecules ( rather , for atom containers ) from signature * strings . * @ param signatureString the signature string to use * @ param coBuilder { @ link IChemObjectBuilder } to build the returned atom container from * @ return an atom container */ public static IAtomContainer fromSignatureString ( String signatureString , IChemObjectBuilder coBuilder ) { } }
ColoredTree tree = AtomSignature . parse ( signatureString ) ; MoleculeFromSignatureBuilder builder = new MoleculeFromSignatureBuilder ( coBuilder ) ; builder . makeFromColoredTree ( tree ) ; return builder . getAtomContainer ( ) ;
public class SuggestedFixes { /** * Adds modifiers to the given declaration and corresponding modifiers tree . */ public static Optional < SuggestedFix > removeModifiers ( ModifiersTree originalModifiers , VisitorState state , Set < Modifier > toRemove ) { } }
SuggestedFix . Builder fix = SuggestedFix . builder ( ) ; List < ErrorProneToken > tokens = state . getTokensForNode ( originalModifiers ) ; int basePos = ( ( JCTree ) originalModifiers ) . getStartPosition ( ) ; boolean empty = true ; for ( ErrorProneToken tok : tokens ) { Modifier mod = getTokModifierKind ( tok ) ; if ( toRemove . contains ( mod ) ) { empty = false ; fix . replace ( basePos + tok . pos ( ) , basePos + tok . endPos ( ) + 1 , "" ) ; } } if ( empty ) { return Optional . empty ( ) ; } return Optional . of ( fix . build ( ) ) ;
public class Watch { /** * Helper to clear the docs on RESET or filter mismatch . */ private void resetDocs ( ) { } }
changeMap . clear ( ) ; resumeToken = null ; for ( DocumentSnapshot snapshot : documentSet ) { // Mark each document as deleted . If documents are not deleted , they will be send again by // the server . changeMap . put ( snapshot . getReference ( ) . getResourcePath ( ) , null ) ; } current = false ;
public class HtmlDocletWriter { /** * Adds the user specified top . * @ param htmlTree the content tree to which user specified top will be added */ public void addTop ( Content htmlTree ) { } }
Content top = new RawHtml ( replaceDocRootDir ( configuration . top ) ) ; fixedNavDiv . addContent ( top ) ;
public class ModelsImpl { /** * Get All Entity Roles for a given entity . * @ param appId The application ID . * @ param versionId The version ID . * @ param entityId entity Id * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws ErrorResponseException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the List & lt ; EntityRole & gt ; object if successful . */ public List < EntityRole > getClosedListEntityRoles ( UUID appId , String versionId , UUID entityId ) { } }
return getClosedListEntityRolesWithServiceResponseAsync ( appId , versionId , entityId ) . toBlocking ( ) . single ( ) . body ( ) ;
public class TextApplier { /** * { @ inheritDoc } */ public void apply ( HSSFCell cell , HSSFCellStyle cellStyle , Map < String , String > style ) { } }
HSSFWorkbook workBook = cell . getSheet ( ) . getWorkbook ( ) ; HSSFFont font = null ; if ( ITALIC . equals ( style . get ( FONT_STYLE ) ) ) { font = getFont ( cell , font ) ; font . setItalic ( true ) ; } int fontSize = CssUtils . getInt ( style . get ( FONT_SIZE ) ) ; if ( fontSize > 0 ) { font = getFont ( cell , font ) ; font . setFontHeightInPoints ( ( short ) fontSize ) ; } if ( BOLD . equals ( style . get ( FONT_WEIGHT ) ) ) { font = getFont ( cell , font ) ; font . setBoldweight ( Font . BOLDWEIGHT_BOLD ) ; } String fontFamily = style . get ( FONT_FAMILY ) ; if ( StringUtils . isNotBlank ( fontFamily ) ) { font = getFont ( cell , font ) ; font . setFontName ( fontFamily ) ; } HSSFColor color = CssUtils . parseColor ( workBook , style . get ( COLOR ) ) ; if ( color != null ) { if ( color . getIndex ( ) != BLACK . index ) { font = getFont ( cell , font ) ; font . setColor ( color . getIndex ( ) ) ; } else { log . info ( "Text Color [{}] Is Black Or Fimiliar To Black, Ignore." , style . remove ( COLOR ) ) ; } } // text - decoration String textDecoration = style . get ( TEXT_DECORATION ) ; if ( UNDERLINE . equals ( textDecoration ) ) { font = getFont ( cell , font ) ; font . setUnderline ( Font . U_SINGLE ) ; } if ( font != null ) { cellStyle . setFont ( font ) ; }
public class GrowingSparseMatrix { /** * { @ inheritDoc } */ public void setColumn ( int column , double [ ] values ) { } }
for ( int row = 0 ; row < rows ( ) ; ++ row ) set ( row , column , values [ row ] ) ;
public class Models { /** * Fetch all the Frames so we can see if they are compatible with our Model ( s ) . */ private Pair < Map < String , Frame > , Map < String , Set < String > > > fetchFrames ( ) { } }
Map < String , Frame > all_frames = null ; Map < String , Set < String > > all_frames_cols = null ; if ( this . find_compatible_frames ) { // caches for this request all_frames = Frames . fetchAll ( ) ; all_frames_cols = new TreeMap < String , Set < String > > ( ) ; for ( Map . Entry < String , Frame > entry : all_frames . entrySet ( ) ) { all_frames_cols . put ( entry . getKey ( ) , new TreeSet < String > ( Arrays . asList ( entry . getValue ( ) . _names ) ) ) ; } } return new Pair < Map < String , Frame > , Map < String , Set < String > > > ( all_frames , all_frames_cols ) ;
public class CssHelper { /** * Binds the < code > css < / code > statement . */ public static ChainableStatement css ( String name , String value ) { } }
return new DefaultChainableStatement ( "css" , JsUtils . quotes ( name ) , JsUtils . quotes ( value ) ) ;
public class SVGUtil { /** * Set a SVG attribute * @ param el element * @ param name attribute name * @ param d double value */ public static void setAtt ( Element el , String name , double d ) { } }
el . setAttribute ( name , fmt ( d ) ) ;
public class AmazonKinesisAsyncClient { /** * Simplified method form for invoking the SplitShard operation with an AsyncHandler . * @ see # splitShardAsync ( SplitShardRequest , com . amazonaws . handlers . AsyncHandler ) */ @ Override public java . util . concurrent . Future < SplitShardResult > splitShardAsync ( String streamName , String shardToSplit , String newStartingHashKey , com . amazonaws . handlers . AsyncHandler < SplitShardRequest , SplitShardResult > asyncHandler ) { } }
return splitShardAsync ( new SplitShardRequest ( ) . withStreamName ( streamName ) . withShardToSplit ( shardToSplit ) . withNewStartingHashKey ( newStartingHashKey ) , asyncHandler ) ;
public class StreamBuilderImpl { /** * Reduce with an accumulator and combiner */ @ Override public < R > StreamBuilderImpl < R , U > reduce ( R identity , BiFunctionSync < R , ? super T , R > accumulator , BinaryOperatorSync < R > combiner ) { } }
return new ReduceAccumSync < > ( this , identity , accumulator , combiner ) ;
public class CompileTask { /** * Converts { @ code < entrypoint / > } nested elements into Compiler entrypoint * replacements . */ private void convertEntryPointParameters ( CompilerOptions options ) { } }
ImmutableList . Builder < ModuleIdentifier > entryPointsBuilder = ImmutableList . builder ( ) ; for ( Parameter p : entryPointParams ) { entryPointsBuilder . add ( ModuleIdentifier . forClosure ( p . getName ( ) ) ) ; } if ( this . manageDependencies ) { options . setDependencyOptions ( DependencyOptions . pruneLegacyForEntryPoints ( entryPointsBuilder . build ( ) ) ) ; }
public class SdpFactory { /** * Rejects a media description from an SDP offer . * @ param answer * The SDP answer to include the rejected media * @ param media * The offered media description to be rejected */ public static void rejectMediaField ( SessionDescription answer , MediaDescriptionField media ) { } }
MediaDescriptionField rejected = new MediaDescriptionField ( ) ; rejected . setMedia ( media . getMedia ( ) ) ; rejected . setPort ( 0 ) ; rejected . setProtocol ( media . getProtocol ( ) ) ; rejected . setPayloadTypes ( media . getPayloadTypes ( ) ) ; rejected . setSession ( answer ) ; answer . addMediaDescription ( rejected ) ;
public class VirtualFileSystem { /** * Returns { @ linkplain OutputStream } to write the contents of the specified file from the beginning . Writing to * the returned stream doesn ' t change the { @ linkplain File } ' s last modified time . * @ param txn { @ linkplain Transaction } instance * @ param fileDescriptor file descriptor * @ return { @ linkplain OutputStream } to write the contents of the specified file from the beginning * @ see # readFile ( Transaction , File ) * @ see # readFile ( Transaction , long ) * @ see # readFile ( Transaction , File , long ) * @ see # writeFile ( Transaction , File ) * @ see # writeFile ( Transaction , File , long ) * @ see # writeFile ( Transaction , long , long ) * @ see # appendFile ( Transaction , File ) * @ see # touchFile ( Transaction , File ) * @ see File # getDescriptor ( ) * @ see File # getLastModified ( ) */ public OutputStream writeFile ( @ NotNull final Transaction txn , final long fileDescriptor ) { } }
return new VfsOutputStream ( this , txn , fileDescriptor , null ) ;
public class AmazonMTurkClient { /** * The < code > AssociateQualificationWithWorker < / code > operation gives a Worker a Qualification . * < code > AssociateQualificationWithWorker < / code > does not require that the Worker submit a Qualification request . It * gives the Qualification directly to the Worker . * You can only assign a Qualification of a Qualification type that you created ( using the * < code > CreateQualificationType < / code > operation ) . * < note > * Note : < code > AssociateQualificationWithWorker < / code > does not affect any pending Qualification requests for the * Qualification by the Worker . If you assign a Qualification to a Worker , then later grant a Qualification request * made by the Worker , the granting of the request may modify the Qualification score . To resolve a pending * Qualification request without affecting the Qualification the Worker already has , reject the request with the * < code > RejectQualificationRequest < / code > operation . * < / note > * @ param associateQualificationWithWorkerRequest * @ return Result of the AssociateQualificationWithWorker operation returned by the service . * @ throws ServiceException * Amazon Mechanical Turk is temporarily unable to process your request . Try your call again . * @ throws RequestErrorException * Your request is invalid . * @ sample AmazonMTurk . AssociateQualificationWithWorker * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / mturk - requester - 2017-01-17 / AssociateQualificationWithWorker " * target = " _ top " > AWS API Documentation < / a > */ @ Override public AssociateQualificationWithWorkerResult associateQualificationWithWorker ( AssociateQualificationWithWorkerRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeAssociateQualificationWithWorker ( request ) ;
public class Reflection { /** * Get current value represented as a string for the property on the supplied target object . * @ param target the target on which the setter is to be called ; may not be null * @ param property the property that is to be set on the target * @ return the current value for the property ; may be null * @ throws NoSuchMethodException if a matching method is not found . * @ throws SecurityException if access to the information is denied . * @ throws IllegalAccessException if the setter method could not be accessed * @ throws InvocationTargetException if there was an error invoking the setter method on the target * @ throws IllegalArgumentException if ' target ' is null , ' property ' is null , or ' property . getName ( ) ' is null */ public String getPropertyAsString ( Object target , Property property ) throws SecurityException , IllegalArgumentException , NoSuchMethodException , IllegalAccessException , InvocationTargetException { } }
Object value = getProperty ( target , property ) ; StringBuilder sb = new StringBuilder ( ) ; writeObjectAsString ( value , sb , false ) ; return sb . toString ( ) ;
public class Base64 { /** * Returns whether or not the < code > octet < / code > is in the base 64 alphabet . * @ param octet * The value to test * @ return < code > true < / code > if the value is defined in the the base 64 alphabet , < code > false < / code > otherwise . * @ since 1.4 */ public static boolean isBase64 ( byte octet ) { } }
return octet == PAD || ( octet >= 0 && octet < DECODE_TABLE . length && DECODE_TABLE [ octet ] != - 1 ) ;
public class MutableInodeFile { /** * Updates this inode file ' s state from the given entry . * @ param entry the entry */ public void updateFromEntry ( UpdateInodeFileEntry entry ) { } }
if ( entry . hasPersistJobId ( ) ) { setPersistJobId ( entry . getPersistJobId ( ) ) ; } if ( entry . hasReplicationMax ( ) ) { setReplicationMax ( entry . getReplicationMax ( ) ) ; } if ( entry . hasReplicationMin ( ) ) { setReplicationMin ( entry . getReplicationMin ( ) ) ; } if ( entry . hasTempUfsPath ( ) ) { setTempUfsPath ( entry . getTempUfsPath ( ) ) ; } if ( entry . hasBlockSizeBytes ( ) ) { setBlockSizeBytes ( entry . getBlockSizeBytes ( ) ) ; } if ( entry . hasCacheable ( ) ) { setCacheable ( entry . getCacheable ( ) ) ; } if ( entry . hasCompleted ( ) ) { setCompleted ( entry . getCompleted ( ) ) ; } if ( entry . hasLength ( ) ) { setLength ( entry . getLength ( ) ) ; } if ( entry . getSetBlocksCount ( ) > 0 ) { setBlockIds ( entry . getSetBlocksList ( ) ) ; }
public class TerminateInstancesRequest { /** * This method is intended for internal use only . Returns the marshaled request configured with additional * parameters to enable operation dry - run . */ @ Override public Request < TerminateInstancesRequest > getDryRunRequest ( ) { } }
Request < TerminateInstancesRequest > request = new TerminateInstancesRequestMarshaller ( ) . marshall ( this ) ; request . addParameter ( "DryRun" , Boolean . toString ( true ) ) ; return request ;
public class SaveOptions { /** * Transparently handles the deprecated options that could be passed as * map - entries to { @ link org . eclipse . emf . ecore . resource . Resource # save ( Map ) } * and converts them to semantically equal { @ link SaveOptions } . * @ param saveOptions the options - map or < code > null < / code > if none . * @ return the options to use . Will never return < code > null < / code > . */ @ SuppressWarnings ( "deprecation" ) public static SaveOptions getOptions ( Map < ? , ? > saveOptions ) { } }
if ( saveOptions == null || saveOptions . isEmpty ( ) ) return defaultOptions ( ) ; if ( saveOptions . containsKey ( KEY ) ) return ( SaveOptions ) saveOptions . get ( KEY ) ; if ( saveOptions . containsKey ( XtextResource . OPTION_SERIALIZATION_OPTIONS ) ) return ( ( org . eclipse . xtext . parsetree . reconstr . SerializerOptions ) saveOptions . get ( XtextResource . OPTION_SERIALIZATION_OPTIONS ) ) . toSaveOptions ( ) ; if ( Boolean . TRUE . equals ( saveOptions . get ( XtextResource . OPTION_FORMAT ) ) ) { return newBuilder ( ) . format ( ) . getOptions ( ) ; } return defaultOptions ( ) ;
public class TileMatrixDao { /** * { @ inheritDoc } */ @ Override public int updateId ( TileMatrix data , TileMatrixKey newId ) throws SQLException { } }
int count = 0 ; TileMatrix readData = queryForId ( data . getId ( ) ) ; if ( readData != null && newId != null ) { readData . setId ( newId ) ; count = update ( readData ) ; } return count ;
public class SQLiteDatabase { /** * Open the database according to the flags { @ link # OPEN _ READWRITE } * { @ link # OPEN _ READONLY } { @ link # CREATE _ IF _ NECESSARY } . * < p > Accepts input param : a concrete instance of { @ link DatabaseErrorHandler } to be * used to handle corruption when sqlite reports database corruption . < / p > * @ param path to database file to open and / or create * @ param factory an optional factory class that is called to instantiate a * cursor when query is called , or null for default * @ param flags to control database access mode * @ param walConnectionPoolSize maximum connection pool size * @ param errorHandler the { @ link DatabaseErrorHandler } obj to be used to handle corruption * when sqlite reports database corruption * @ return the newly opened database * @ throws com . couchbase . lite . internal . database . sqlite . exception . SQLiteException if the database cannot be opened */ public static SQLiteDatabase openDatabase ( String path , CursorFactory factory , int flags , int walConnectionPoolSize , DatabaseErrorHandler errorHandler , com . couchbase . lite . internal . database . sqlite . SQLiteConnectionListener connectionListener ) { } }
SQLiteDatabase db = new SQLiteDatabase ( path , flags , walConnectionPoolSize , factory , errorHandler , connectionListener ) ; db . open ( ) ; return db ;
public class BigDecimal { /** * Returns a { @ code BigDecimal } created from { @ code BigInteger } value with * given scale rounded according to the MathContext settings */ private static BigDecimal doRound ( BigInteger intVal , int scale , MathContext mc ) { } }
int mcp = mc . precision ; int prec = 0 ; if ( mcp > 0 ) { long compactVal = compactValFor ( intVal ) ; int mode = mc . roundingMode . oldMode ; int drop ; if ( compactVal == INFLATED ) { prec = bigDigitLength ( intVal ) ; drop = prec - mcp ; while ( drop > 0 ) { scale = checkScaleNonZero ( ( long ) scale - drop ) ; intVal = divideAndRoundByTenPow ( intVal , drop , mode ) ; compactVal = compactValFor ( intVal ) ; if ( compactVal != INFLATED ) { break ; } prec = bigDigitLength ( intVal ) ; drop = prec - mcp ; } } if ( compactVal != INFLATED ) { prec = longDigitLength ( compactVal ) ; drop = prec - mcp ; // drop can ' t be more than 18 while ( drop > 0 ) { scale = checkScaleNonZero ( ( long ) scale - drop ) ; compactVal = divideAndRound ( compactVal , LONG_TEN_POWERS_TABLE [ drop ] , mc . roundingMode . oldMode ) ; prec = longDigitLength ( compactVal ) ; drop = prec - mcp ; } return valueOf ( compactVal , scale , prec ) ; } } return new BigDecimal ( intVal , INFLATED , scale , prec ) ;
public class ListInstancesRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ListInstancesRequest listInstancesRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( listInstancesRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( listInstancesRequest . getServiceId ( ) , SERVICEID_BINDING ) ; protocolMarshaller . marshall ( listInstancesRequest . getNextToken ( ) , NEXTTOKEN_BINDING ) ; protocolMarshaller . marshall ( listInstancesRequest . getMaxResults ( ) , MAXRESULTS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class WebUtil { /** * get user agent */ public static UserAgent getUserAgent ( HttpServletRequest request ) { } }
if ( request == null ) { return null ; } String userAgentHead = request . getHeader ( "User-Agent" ) ; return getUserAgent ( userAgentHead ) ;
public class Texture2dProgram { /** * Sets the size of the texture . This is used to find adjacent texels when filtering . */ public void setTexSize ( int width , int height ) { } }
mTexHeight = height ; mTexWidth = width ; float rw = 1.0f / width ; float rh = 1.0f / height ; // Don ' t need to create a new array here , but it ' s syntactically convenient . mTexOffset = new float [ ] { - rw , - rh , 0f , - rh , rw , - rh , - rw , 0f , 0f , 0f , rw , 0f , - rw , rh , 0f , rh , rw , rh } ; // Log . d ( TAG , " filt size : " + width + " x " + height + " : " + Arrays . toString ( mTexOffset ) ) ;
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public EClass getIfcPressureMeasure ( ) { } }
if ( ifcPressureMeasureEClass == null ) { ifcPressureMeasureEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 854 ) ; } return ifcPressureMeasureEClass ;
public class CqlBlockedDataReaderDAO { /** * Creates a Record instance for a given key and list of rows . All rows must be from the same Cassandra row ; * in other words , it is expected that row . getBytesUnsafe ( ROW _ KEY _ RESULT _ SET _ COLUMN ) returns the same value for * each row in rows . */ private Record newRecordFromCql ( Key key , Iterable < Row > rows , Placement placement , String rowKey ) { } }
Session session = placement . getKeyspace ( ) . getCqlSession ( ) ; ProtocolVersion protocolVersion = session . getCluster ( ) . getConfiguration ( ) . getProtocolOptions ( ) . getProtocolVersion ( ) ; CodecRegistry codecRegistry = session . getCluster ( ) . getConfiguration ( ) . getCodecRegistry ( ) ; Iterator < Map . Entry < DeltaClusteringKey , Change > > changeIter = decodeChangesFromCql ( new CqlDeltaIterator ( rows . iterator ( ) , BLOCK_RESULT_SET_COLUMN , CHANGE_ID_RESULT_SET_COLUMN , VALUE_RESULT_SET_COLUMN , false , _deltaPrefixLength , protocolVersion , codecRegistry , rowKey ) ) ; Iterator < Map . Entry < DeltaClusteringKey , Compaction > > compactionIter = decodeCompactionsFromCql ( new CqlDeltaIterator ( rows . iterator ( ) , BLOCK_RESULT_SET_COLUMN , CHANGE_ID_RESULT_SET_COLUMN , VALUE_RESULT_SET_COLUMN , false , _deltaPrefixLength , protocolVersion , codecRegistry , rowKey ) ) ; Iterator < RecordEntryRawMetadata > rawMetadataIter = rawMetadataFromCql ( new CqlDeltaIterator ( rows . iterator ( ) , BLOCK_RESULT_SET_COLUMN , CHANGE_ID_RESULT_SET_COLUMN , VALUE_RESULT_SET_COLUMN , false , _deltaPrefixLength , protocolVersion , codecRegistry , rowKey ) ) ; return new RecordImpl ( key , compactionIter , changeIter , rawMetadataIter ) ;
public class GenListModuleReader { /** * List of files with " @ processing - role = resource - only " . * @ return the resource - only set */ public Set < URI > getResourceOnlySet ( ) { } }
final Set < URI > res = new HashSet < > ( resourceOnlySet ) ; res . removeAll ( normalProcessingRoleSet ) ; return res ;
public class Bytes { /** * Returns an array of byte arrays made from passed array of Text . * @ param t operands * @ return Array of byte arrays made from passed array of Text */ public static byte [ ] [ ] toByteArrays ( final String [ ] t ) { } }
byte [ ] [ ] result = new byte [ t . length ] [ ] ; for ( int i = 0 ; i < t . length ; i ++ ) { result [ i ] = Bytes . toBytes ( t [ i ] ) ; } return result ;
public class Item { /** * Sets the value of the specified attribute in the current item to the * given value . */ public Item withNumber ( String attrName , Number val ) { } }
checkInvalidAttribute ( attrName , val ) ; attributes . put ( attrName , toBigDecimal ( val ) ) ; return this ;
public class Comment { /** * { @ inheritDoc } */ @ Override public boolean write ( Writer out , boolean atNewline ) throws IOException { } }
if ( ! atNewline ) out . write ( DocletConstants . NL ) ; out . write ( "<!-- " ) ; out . write ( commentText ) ; out . write ( " -->" + DocletConstants . NL ) ; return true ;
public class BindTransformer { /** * Get Java primitive type Transformable . * @ param type * the type * @ return the primitive transform */ static BindTransform getPrimitiveTransform ( TypeName type ) { } }
if ( Integer . TYPE . toString ( ) . equals ( type . toString ( ) ) ) { return new IntegerBindTransform ( false ) ; } if ( Boolean . TYPE . toString ( ) . equals ( type . toString ( ) ) ) { return new BooleanBindTransform ( false ) ; } if ( Long . TYPE . toString ( ) . equals ( type . toString ( ) ) ) { return new LongBindTransform ( false ) ; } if ( Double . TYPE . toString ( ) . equals ( type . toString ( ) ) ) { return new DoubleBindTransform ( false ) ; } if ( Float . TYPE . toString ( ) . equals ( type . toString ( ) ) ) { return new FloatBindTransform ( false ) ; } if ( Short . TYPE . toString ( ) . equals ( type . toString ( ) ) ) { return new ShortBindTransform ( false ) ; } if ( Byte . TYPE . toString ( ) . equals ( type . toString ( ) ) ) { return new ByteBindTransform ( false ) ; } if ( Character . TYPE . toString ( ) . equals ( type . toString ( ) ) ) { return new CharacterBindTransform ( false ) ; } return null ;
public class Trash { /** * For each existing trash snapshot , uses a { @ link org . apache . gobblin . data . management . trash . SnapshotCleanupPolicy } to determine whether * the snapshot should be deleted . If so , delete it permanently . * Each existing snapshot will be passed to { @ link org . apache . gobblin . data . management . trash . SnapshotCleanupPolicy # shouldDeleteSnapshot } * from oldest to newest , and will be deleted if the method returns true . * @ throws IOException */ public void purgeTrashSnapshots ( ) throws IOException { } }
List < FileStatus > snapshotsInTrash = Arrays . asList ( this . fs . listStatus ( this . trashLocation , TRASH_SNAPSHOT_PATH_FILTER ) ) ; Collections . sort ( snapshotsInTrash , new Comparator < FileStatus > ( ) { @ Override public int compare ( FileStatus o1 , FileStatus o2 ) { return TRASH_SNAPSHOT_NAME_FORMATTER . parseDateTime ( o1 . getPath ( ) . getName ( ) ) . compareTo ( TRASH_SNAPSHOT_NAME_FORMATTER . parseDateTime ( o2 . getPath ( ) . getName ( ) ) ) ; } } ) ; int totalSnapshots = snapshotsInTrash . size ( ) ; int snapshotsDeleted = 0 ; for ( FileStatus snapshot : snapshotsInTrash ) { if ( this . snapshotCleanupPolicy . shouldDeleteSnapshot ( snapshot , this ) ) { try { boolean successfullyDeleted = this . fs . delete ( snapshot . getPath ( ) , true ) ; if ( successfullyDeleted ) { snapshotsDeleted ++ ; } else { LOG . error ( "Failed to delete snapshot " + snapshot . getPath ( ) ) ; } } catch ( IOException exception ) { LOG . error ( "Failed to delete snapshot " + snapshot . getPath ( ) , exception ) ; } } } LOG . info ( String . format ( "Deleted %d out of %d existing snapshots." , snapshotsDeleted , totalSnapshots ) ) ;
public class AgentServlet { /** * Update the agent URL in the agent details if not already done */ private void updateAgentDetailsIfNeeded ( HttpServletRequest pReq ) { } }
// Lookup the Agent URL if needed AgentDetails details = backendManager . getAgentDetails ( ) ; if ( details . isInitRequired ( ) ) { synchronized ( details ) { if ( details . isInitRequired ( ) ) { if ( details . isUrlMissing ( ) ) { String url = getBaseUrl ( NetworkUtil . sanitizeLocalUrl ( pReq . getRequestURL ( ) . toString ( ) ) , extractServletPath ( pReq ) ) ; details . setUrl ( url ) ; } if ( details . isSecuredMissing ( ) ) { details . setSecured ( pReq . getAuthType ( ) != null ) ; } details . seal ( ) ; } } }
public class DeLiCluTree { /** * Creates a new directory entry representing the specified node . * @ param node the node to be represented by the new entry */ @ Override protected DeLiCluEntry createNewDirectoryEntry ( DeLiCluNode node ) { } }
return new DeLiCluDirectoryEntry ( node . getPageID ( ) , node . computeMBR ( ) , node . hasHandled ( ) , node . hasUnhandled ( ) ) ;
public class Grid { /** * Replies the element at the specified index . * @ param index the index . * @ return the element at the specified position . */ @ Pure public P getElementAt ( int index ) { } }
if ( index >= 0 ) { int idx = 0 ; int eIdx ; for ( final GridCell < P > cell : getGridCells ( ) ) { eIdx = idx + cell . getReferenceElementCount ( ) ; if ( index < eIdx ) { try { return cell . getElementAt ( index - idx ) ; } catch ( IndexOutOfBoundsException exception ) { throw new IndexOutOfBoundsException ( Integer . toString ( index ) ) ; } } idx = eIdx ; } } throw new IndexOutOfBoundsException ( Integer . toString ( index ) ) ;
public class PersistInterfaceService { /** * Runs all registered post - commit hooks */ private void runEKBPostCommitHooks ( EKBCommit commit ) throws EKBException { } }
for ( EKBPostCommitHook hook : postCommitHooks ) { try { hook . onPostCommit ( commit ) ; } catch ( Exception e ) { LOGGER . warn ( "An exception is thrown in a EKB post commit hook." , e ) ; } }
public class RoleAssignmentsInner { /** * Gets role assignments for a scope . * @ param scope The scope of the role assignments . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the PagedList & lt ; RoleAssignmentInner & gt ; object */ public Observable < Page < RoleAssignmentInner > > listForScopeAsync ( final String scope ) { } }
return listForScopeWithServiceResponseAsync ( scope ) . map ( new Func1 < ServiceResponse < Page < RoleAssignmentInner > > , Page < RoleAssignmentInner > > ( ) { @ Override public Page < RoleAssignmentInner > call ( ServiceResponse < Page < RoleAssignmentInner > > response ) { return response . body ( ) ; } } ) ;
public class SynchronizationPoint { /** * Report this synchronization point as failed because of the given exception . The { @ code failureException } must be set . * @ param failureException the exception causing this synchronization point to fail . */ public void reportFailure ( E failureException ) { } }
assert failureException != null ; connectionLock . lock ( ) ; try { state = State . Failure ; this . failureException = failureException ; condition . signalAll ( ) ; } finally { connectionLock . unlock ( ) ; }
public class DefaultTokenMap { /** * Called when only the schema has changed . */ public DefaultTokenMap refresh ( @ NonNull Collection < Node > nodes , @ NonNull Collection < KeyspaceMetadata > keyspaces , @ NonNull ReplicationStrategyFactory replicationStrategyFactory ) { } }
Map < CqlIdentifier , Map < String , String > > newReplicationConfigs = buildReplicationConfigs ( keyspaces , logPrefix ) ; if ( newReplicationConfigs . equals ( replicationConfigs ) ) { LOG . debug ( "[{}] Schema changes do not impact the token map, no refresh needed" , logPrefix ) ; return this ; } ImmutableMap . Builder < Map < String , String > , KeyspaceTokenMap > newKeyspaceMapsBuilder = ImmutableMap . builder ( ) ; // Will only be built if needed : Map < Token , Node > tokenToPrimary = null ; List < Token > ring = null ; for ( Map < String , String > config : ImmutableSet . copyOf ( newReplicationConfigs . values ( ) ) ) { KeyspaceTokenMap oldKeyspaceMap = keyspaceMaps . get ( config ) ; if ( oldKeyspaceMap != null ) { LOG . debug ( "[{}] Reusing existing keyspace-level data for {}" , logPrefix , config ) ; newKeyspaceMapsBuilder . put ( config , oldKeyspaceMap ) ; } else { LOG . debug ( "[{}] Computing new keyspace-level data for {}" , logPrefix , config ) ; if ( tokenToPrimary == null ) { TokenToPrimaryAndRing tmp = buildTokenToPrimaryAndRing ( nodes , tokenFactory ) ; tokenToPrimary = tmp . tokenToPrimary ; ring = tmp . ring ; } newKeyspaceMapsBuilder . put ( config , KeyspaceTokenMap . build ( config , tokenToPrimary , ring , tokenRanges , tokenFactory , replicationStrategyFactory , logPrefix ) ) ; } } return new DefaultTokenMap ( tokenFactory , tokenRanges , tokenRangesByPrimary , newReplicationConfigs , newKeyspaceMapsBuilder . build ( ) , logPrefix ) ;
public class FileIngester { /** * Imports a csv file defined in the fileIngest entity * @ see FileIngestJobExecutionMetadata */ public FileMeta ingest ( String entityTypeId , String url , String loader , String jobExecutionID , Progress progress ) { } }
if ( ! "CSV" . equals ( loader ) ) { throw new FileIngestException ( "Unknown loader '" + loader + "'" ) ; } progress . setProgressMax ( 2 ) ; progress . progress ( 0 , "Downloading url '" + url + "'" ) ; File file = fileStoreDownload . downloadFile ( url , jobExecutionID , entityTypeId + ".csv" ) ; progress . progress ( 1 , "Importing..." ) ; FileRepositoryCollection repoCollection = fileRepositoryCollectionFactory . createFileRepositoryCollection ( file ) ; ImportService importService = importServiceFactory . getImportService ( file , repoCollection ) ; EntityImportReport report = importService . doImport ( repoCollection , MetadataAction . UPSERT , ADD_UPDATE_EXISTING , null ) ; progress . status ( "Ingestion of url '" + url + "' done." ) ; Integer count = report . getNrImportedEntitiesMap ( ) . get ( entityTypeId ) ; count = count != null ? count : 0 ; progress . progress ( 2 , "Successfully imported " + count + " " + entityTypeId + " entities." ) ; FileMeta fileMeta = createFileMeta ( jobExecutionID , file ) ; FileIngestJobExecution fileIngestJobExecution = ( FileIngestJobExecution ) progress . getJobExecution ( ) ; fileIngestJobExecution . setFile ( fileMeta ) ; dataService . add ( FILE_META , fileMeta ) ; return fileMeta ;
public class TreeAppendable { /** * / * @ Nullable */ private static ILocationData createLocationData ( ITraceURIConverter converter , ILocationInFileProvider locationProvider , EObject object , ILocationInFileProviderExtension . RegionDescription query , boolean skipEmpty ) { } }
ITextRegion textRegion = locationProvider instanceof ILocationInFileProviderExtension ? ( ( ILocationInFileProviderExtension ) locationProvider ) . getTextRegion ( object , query ) : locationProvider . getFullTextRegion ( object ) ; if ( ! ( textRegion instanceof ITextRegionWithLineInformation ) ) { if ( log . isDebugEnabled ( ) ) log . debug ( "location provider returned text region without line information." , new Exception ( ) ) ; if ( textRegion != null ) textRegion = new TextRegionWithLineInformation ( textRegion . getOffset ( ) , textRegion . getLength ( ) , 0 , 0 ) ; else return null ; } // usually we want to skip empty regions but if the root region is empty , we want to use it to store the path information along // with the empty offset / length pair if ( skipEmpty && textRegion == ITextRegion . EMPTY_REGION ) { return null ; } ILocationData newData = createLocationData ( converter , object , ( ITextRegionWithLineInformation ) textRegion ) ; return newData ;
public class GUID { /** * ~ Methods / / / / / */ public static String generateFormattedGUID ( ) { } }
// xxxxx - xxxx - xxxx - xxxx - xxxxx String guid = generateGUID ( ) ; return guid . substring ( 0 , 8 ) + '-' + guid . substring ( 8 , 12 ) + '-' + guid . substring ( 12 , 16 ) + '-' + guid . substring ( 16 , 20 ) + '-' + guid . substring ( 20 ) ;
public class EventRef { /** * Helper method that computes the event descriptor sting for a method */ private String computeEventDescriptor ( Method method ) { } }
StringBuilder sb = new StringBuilder ( ) ; // Add event class and method name sb . append ( method . getDeclaringClass ( ) . getName ( ) ) ; sb . append ( "." ) ; sb . append ( method . getName ( ) ) ; // Add event arguments Class [ ] parms = method . getParameterTypes ( ) ; sb . append ( "(" ) ; for ( int i = 0 ; i < parms . length ; i ++ ) appendTypeDescriptor ( sb , parms [ i ] ) ; sb . append ( ")" ) ; // Add event return type appendTypeDescriptor ( sb , method . getReturnType ( ) ) ; return sb . toString ( ) ;
public class GitLabApiClient { /** * Construct a REST URL with the specified path arguments . * @ param pathArgs variable list of arguments used to build the URI * @ return a REST URL with the specified path arguments * @ throws IOException if an error occurs while constructing the URL */ protected URL getApiUrl ( Object ... pathArgs ) throws IOException { } }
String url = appendPathArgs ( this . hostUrl , pathArgs ) ; return ( new URL ( url ) ) ;
public class H2O { /** * Tell the embedding software that this H2O instance belongs to * a cloud of a certain size . * This may be non - blocking . * @ param ip IP address this H2O can be reached at . * @ param port Port this H2O can be reached at ( for REST API and browser ) . * @ param size Number of H2O instances in the cloud . */ public static void notifyAboutCloudSize ( InetAddress ip , int port , InetAddress leaderIp , int leaderPort , int size ) { } }
if ( ARGS . notify_local != null && ! ARGS . notify_local . trim ( ) . isEmpty ( ) ) { final File notifyFile = new File ( ARGS . notify_local ) ; final File parentDir = notifyFile . getParentFile ( ) ; if ( parentDir != null && ! parentDir . isDirectory ( ) ) { if ( ! parentDir . mkdirs ( ) ) { Log . err ( "Cannot make parent dir for notify file." ) ; H2O . exit ( - 1 ) ; } } try ( BufferedWriter output = new BufferedWriter ( new FileWriter ( notifyFile ) ) ) { output . write ( SELF_ADDRESS . getHostAddress ( ) ) ; output . write ( ':' ) ; output . write ( Integer . toString ( API_PORT ) ) ; output . flush ( ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } } if ( embeddedH2OConfig == null ) { return ; } embeddedH2OConfig . notifyAboutCloudSize ( ip , port , leaderIp , leaderPort , size ) ;
public class Fat16BootSector { /** * Sets the number of sectors / fat * @ param v the new number of sectors per fat */ @ Override public void setSectorsPerFat ( long v ) { } }
if ( v == getSectorsPerFat ( ) ) return ; if ( v > 0x7FFF ) throw new IllegalArgumentException ( "too many sectors for a FAT12/16" ) ; set16 ( SECTORS_PER_FAT_OFFSET , ( int ) v ) ;
public class FirewallClient { /** * Creates a firewall rule in the specified project using the data included in the request . * < p > Sample code : * < pre > < code > * try ( FirewallClient firewallClient = FirewallClient . create ( ) ) { * ProjectName project = ProjectName . of ( " [ PROJECT ] " ) ; * Firewall firewallResource = Firewall . newBuilder ( ) . build ( ) ; * Operation response = firewallClient . insertFirewall ( project , firewallResource ) ; * < / code > < / pre > * @ param project Project ID for this request . * @ param firewallResource Represents a Firewall resource . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi public final Operation insertFirewall ( ProjectName project , Firewall firewallResource ) { } }
InsertFirewallHttpRequest request = InsertFirewallHttpRequest . newBuilder ( ) . setProject ( project == null ? null : project . toString ( ) ) . setFirewallResource ( firewallResource ) . build ( ) ; return insertFirewall ( request ) ;
public class Broadcaster { /** * A . m3u8 file was written in the recording directory . * Called on a background thread */ @ Subscribe public void onManifestUpdated ( HlsManifestWrittenEvent e ) { } }
if ( ! isRecording ( ) ) { if ( Kickflip . getBroadcastListener ( ) != null ) { if ( VERBOSE ) Log . i ( TAG , "Sending onBroadcastStop" ) ; Kickflip . getBroadcastListener ( ) . onBroadcastStop ( ) ; } } if ( VERBOSE ) Log . i ( TAG , "onManifestUpdated. Last segment? " + ! isRecording ( ) ) ; // Copy m3u8 at this moment and queue it to uploading // service final File copy = new File ( mManifestSnapshotDir , e . getManifestFile ( ) . getName ( ) . replace ( ".m3u8" , "_" + mNumSegmentsWritten + ".m3u8" ) ) ; try { if ( VERBOSE ) Log . i ( TAG , "Copying " + e . getManifestFile ( ) . getAbsolutePath ( ) + " to " + copy . getAbsolutePath ( ) ) ; FileUtils . copy ( e . getManifestFile ( ) , copy ) ; queueOrSubmitUpload ( keyForFilename ( "index.m3u8" ) , copy ) ; appendLastManifestEntryToEventManifest ( copy , ! isRecording ( ) ) ; } catch ( IOException e1 ) { Log . e ( TAG , "Failed to copy manifest file. Upload of this manifest cannot proceed. Stream will have a discontinuity!" ) ; e1 . printStackTrace ( ) ; } mNumSegmentsWritten ++ ;
public class MiriamLink { /** * To know if a URI of a data type is deprecated . * @ param uri ( URN or URL ) of a data type * @ return answer ( " true " or " false " ) to the question : is this URI deprecated ? */ public static boolean isDeprecated ( String uri ) { } }
Datatype datatype = datatypesHash . get ( uri ) ; String urn = getOfficialDataTypeURI ( datatype ) ; return ! uri . equalsIgnoreCase ( urn ) ;
public class MongoDBQuery { /** * ( non - Javadoc ) * @ see com . impetus . kundera . query . QueryImpl # iterate ( ) */ @ Override public Iterator iterate ( ) { } }
EntityMetadata m = getEntityMetadata ( ) ; Client client = persistenceDelegeator . getClient ( m ) ; return new ResultIterator ( ( MongoDBClient ) client , m , createMongoQuery ( m , getKunderaQuery ( ) . getFilterClauseQueue ( ) ) , getOrderByClause ( m ) , getKeys ( m , getKunderaQuery ( ) . getResult ( ) ) , persistenceDelegeator , getFetchSize ( ) != null ? getFetchSize ( ) : this . maxResult ) ;
public class Days { /** * Obtains an instance of { @ code Days } from a temporal amount . * This obtains an instance based on the specified amount . * A { @ code TemporalAmount } represents an amount of time , which may be * date - based or time - based , which this factory extracts to a { @ code Days } . * The result is calculated by looping around each unit in the specified amount . * Each amount is converted to days using { @ link Temporals # convertAmount } . * If the conversion yields a remainder , an exception is thrown . * If the amount is zero , the unit is ignored . * @ param amount the temporal amount to convert , not null * @ return the equivalent amount , not null * @ throws DateTimeException if unable to convert to a { @ code Days } * @ throws ArithmeticException if numeric overflow occurs */ public static Days from ( TemporalAmount amount ) { } }
if ( amount instanceof Days ) { return ( Days ) amount ; } Objects . requireNonNull ( amount , "amount" ) ; int days = 0 ; for ( TemporalUnit unit : amount . getUnits ( ) ) { long value = amount . get ( unit ) ; if ( value != 0 ) { long [ ] converted = Temporals . convertAmount ( value , unit , DAYS ) ; if ( converted [ 1 ] != 0 ) { throw new DateTimeException ( "Amount could not be converted to a whole number of days: " + value + " " + unit ) ; } days = Math . addExact ( days , Math . toIntExact ( converted [ 0 ] ) ) ; } } return of ( days ) ;
public class NS { /** * Returns an < code > IfNotExistsFunction < / code > object which represents an < a href = * " http : / / docs . aws . amazon . com / amazondynamodb / latest / developerguide / Expressions . Modifying . html " * > if _ not _ exists ( path , operand ) < / a > function call where path refers to that * of the current path operand ; used for building expressions . * < pre > * " if _ not _ exists ( path , operand ) – If the item does not contain an attribute * at the specified path , then if _ not _ exists evaluates to operand ; otherwise , * it evaluates to path . You can use this function to avoid overwriting an * attribute already present in the item . " * < / pre > * @ param defaultValue * the default value that will be used as the operand to the * if _ not _ exists function call . */ public IfNotExistsFunction < NS > ifNotExists ( Number ... defaultValue ) { } }
return new IfNotExistsFunction < NS > ( this , new LiteralOperand ( new LinkedHashSet < Number > ( Arrays . asList ( defaultValue ) ) ) ) ;
public class CPDefinitionOptionRelUtil { /** * Returns a range of all the cp definition option rels where companyId = & # 63 ; . * Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link CPDefinitionOptionRelModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order . * @ param companyId the company ID * @ param start the lower bound of the range of cp definition option rels * @ param end the upper bound of the range of cp definition option rels ( not inclusive ) * @ return the range of matching cp definition option rels */ public static List < CPDefinitionOptionRel > findByCompanyId ( long companyId , int start , int end ) { } }
return getPersistence ( ) . findByCompanyId ( companyId , start , end ) ;
public class Ftp { /** * check if a file or directory exists * @ return FTPCLient * @ throws PageException * @ throws IOException */ private AFTPClient actionExists ( ) throws PageException , IOException { } }
required ( "item" , item ) ; AFTPClient client = getClient ( ) ; FTPFile file = existsFile ( client , item , false ) ; Struct cfftp = writeCfftp ( client ) ; cfftp . setEL ( RETURN_VALUE , Caster . toBoolean ( file != null ) ) ; cfftp . setEL ( SUCCEEDED , Boolean . TRUE ) ; return client ;
public class StorageSnippets { /** * [ VARIABLE " my _ unique _ bucket " ] */ public boolean deleteBucketAcl ( String bucketName ) { } }
// [ START deleteBucketAcl ] boolean deleted = storage . deleteAcl ( bucketName , User . ofAllAuthenticatedUsers ( ) ) ; if ( deleted ) { // the acl entry was deleted } else { // the acl entry was not found } // [ END deleteBucketAcl ] return deleted ;
public class QRDecompositionHouseholderTran_ZDRM { /** * To decompose the matrix ' A ' it must have full rank . ' A ' is a ' m ' by ' n ' matrix . * It requires about 2n * m < sup > 2 < / sup > - 2m < sup > 2 < / sup > / 3 flops . * The matrix provided here can be of different * dimension than the one specified in the constructor . It just has to be smaller than or equal * to it . */ @ Override public boolean decompose ( ZMatrixRMaj A ) { } }
setExpectedMaxSize ( A . numRows , A . numCols ) ; CommonOps_ZDRM . transpose ( A , QR ) ; error = false ; for ( int j = 0 ; j < minLength ; j ++ ) { householder ( j ) ; updateA ( j ) ; } return ! error ;
public class MemoryManager { /** * Log memory usage to the console at regular interval . */ public static void logMemory ( long interval , Level level ) { } }
Task < Void , NoException > task = new Task . Cpu < Void , NoException > ( "Logging memory" , Task . PRIORITY_BACKGROUND ) { @ Override public Void run ( ) { logMemory ( level ) ; return null ; } } ; task . executeEvery ( interval , interval ) ; task . start ( ) ;
public class MemberLiteral { /** * Returns { @ code true } if this member is declared as default access . * @ return { @ code true } if default access */ public boolean isDefaultAccess ( ) { } }
return ! Modifier . isPrivate ( getModifiers ( ) ) && ! Modifier . isProtected ( getModifiers ( ) ) && ! Modifier . isPublic ( getModifiers ( ) ) ;
public class UIData { /** * Set the maximum number of rows displayed in the table . */ public void setRows ( int rows ) { } }
if ( rows < 0 ) { throw new IllegalArgumentException ( "rows: " + rows ) ; } getStateHelper ( ) . put ( PropertyKeys . rows , rows ) ;
public class Linear { /** * Returns the vector which was assigned the given internal id or null if the internal id does not exist . * The vector is taken either from the ram - based ( if loadIndexInMemory is true ) or from the disk - based * index . * @ param iid * The internal id of the vector * @ return The vector with the given internal id or null if the internal id does not exist */ public double [ ] getVector ( int iid ) { } }
if ( iid < 0 || iid > loadCounter ) { System . out . println ( "Internal id " + iid + " is out of range!" ) ; return null ; } double [ ] vector = new double [ vectorLength ] ; if ( loadIndexInMemory ) { for ( int i = 0 ; i < vectorLength ; i ++ ) { vector [ i ] = vectorsList . getQuick ( iid * vectorLength + i ) ; } } else { // get the vector from the BDB structure DatabaseEntry key = new DatabaseEntry ( ) ; IntegerBinding . intToEntry ( iid , key ) ; DatabaseEntry foundData = new DatabaseEntry ( ) ; if ( iidToVectorDB . get ( null , key , foundData , null ) == OperationStatus . SUCCESS ) { TupleInput input = TupleBinding . entryToInput ( foundData ) ; for ( int i = 0 ; i < vectorLength ; i ++ ) { vector [ i ] = input . readDouble ( ) ; } } else { System . out . println ( "Internal id " + iid + " is in range but vector was not found.." ) ; System . out . println ( "Index is probably corrupted" ) ; System . exit ( 0 ) ; return null ; } } return vector ;
public class Filelistener { /** * In this method a subdirectory is being added to the system and watched . * This is necessary since the { @ link WatchService } doesn ' t support watching * a folder with higher depths than 1. * @ param root * @ param filePath * @ throws IOException */ private void addSubDirectory ( Path root , Path filePath ) throws IOException { } }
String listener = getListenerRootPath ( root ) ; List < String > listeners = mSubDirectories . get ( listener ) ; if ( listeners != null ) { if ( mSubDirectories . get ( listener ) . contains ( filePath . toAbsolutePath ( ) ) ) { return ; } else { mSubDirectories . get ( listener ) . add ( filePath . toString ( ) ) ; } try { watchDir ( filePath . toFile ( ) ) ; } catch ( IOException e ) { throw new IOException ( "Could not watch the subdirectories." , e ) ; } }
public class SiteTaskerQueue { /** * Block on the site tasker queue . */ public SiteTasker take ( ) throws InterruptedException { } }
SiteTasker task = m_tasks . poll ( ) ; if ( task == null ) { m_starvationTracker . beginStarvation ( ) ; } else { m_queueDepthTracker . pollUpdate ( task . getQueueOfferTime ( ) ) ; return task ; } try { task = CoreUtils . queueSpinTake ( m_tasks ) ; // task is never null m_queueDepthTracker . pollUpdate ( task . getQueueOfferTime ( ) ) ; return task ; } finally { m_starvationTracker . endStarvation ( ) ; }