idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
154,000
public void onItemBind ( int position , T item ) { if ( onItemBind != null ) { variableId = VAR_INVALID ; layoutRes = LAYOUT_NONE ; onItemBind . onItemBind ( this , position , item ) ; if ( variableId == VAR_INVALID ) { throw new IllegalStateException ( "variableId not set in onItemBind()" ) ; } if ( layoutRes == LAYOUT_NONE ) { throw new IllegalStateException ( "layoutRes not set in onItemBind()" ) ; } } }
Updates the state of the binding for the given item and position . This is called internally by the binding collection adapters .
125
24
154,001
public MergeObservableList < T > insertItem ( T object ) { lists . add ( Collections . singletonList ( object ) ) ; modCount += 1 ; listeners . notifyInserted ( this , size ( ) - 1 , 1 ) ; return this ; }
Inserts the given item into the merge list .
56
10
154,002
public boolean removeItem ( T object ) { int size = 0 ; for ( int i = 0 , listsSize = lists . size ( ) ; i < listsSize ; i ++ ) { List < ? extends T > list = lists . get ( i ) ; if ( ! ( list instanceof ObservableList ) ) { Object item = list . get ( 0 ) ; if ( ( object == null ) ? ( item == null ) : object . equals ( item ) ) { lists . remove ( i ) ; modCount += 1 ; listeners . notifyRemoved ( this , size , 1 ) ; return true ; } } size += list . size ( ) ; } return false ; }
Removes the given item from the merge list .
141
10
154,003
public void removeAll ( ) { int size = size ( ) ; if ( size == 0 ) { return ; } for ( int i = 0 , listSize = lists . size ( ) ; i < listSize ; i ++ ) { List < ? extends T > list = lists . get ( i ) ; if ( list instanceof ObservableList ) { ( ( ObservableList ) list ) . removeOnListChangedCallback ( callback ) ; } } lists . clear ( ) ; modCount += 1 ; listeners . notifyRemoved ( this , 0 , size ) ; }
Removes all items and lists from the merge list .
118
11
154,004
public static void endTransitions ( final @ NonNull ViewGroup sceneRoot ) { sPendingTransitions . remove ( sceneRoot ) ; final ArrayList < Transition > runningTransitions = getRunningTransitions ( sceneRoot ) ; if ( ! runningTransitions . isEmpty ( ) ) { // Make a copy in case this is called by an onTransitionEnd listener ArrayList < Transition > copy = new ArrayList ( runningTransitions ) ; for ( int i = copy . size ( ) - 1 ; i >= 0 ; i -- ) { final Transition transition = copy . get ( i ) ; transition . forceToEnd ( sceneRoot ) ; } } }
Ends all pending and ongoing transitions on the specified scene root .
138
13
154,005
@ NonNull public TransitionSet setOrdering ( int ordering ) { switch ( ordering ) { case ORDERING_SEQUENTIAL : mPlayTogether = false ; break ; case ORDERING_TOGETHER : mPlayTogether = true ; break ; default : throw new AndroidRuntimeException ( "Invalid parameter for TransitionSet " + "ordering: " + ordering ) ; } return this ; }
Sets the play order of this set s child transitions .
82
12
154,006
@ Nullable public Transition getTransitionAt ( int index ) { if ( index < 0 || index >= mTransitions . size ( ) ) { return null ; } return mTransitions . get ( index ) ; }
Returns the child Transition at the specified position in the TransitionSet .
46
13
154,007
private static void extract ( String s , int start , ExtractFloatResult result ) { // Now looking for ' ', ',', '.' or '-' from the start. int currentIndex = start ; boolean foundSeparator = false ; result . mEndWithNegOrDot = false ; boolean secondDot = false ; boolean isExponential = false ; for ( ; currentIndex < s . length ( ) ; currentIndex ++ ) { boolean isPrevExponential = isExponential ; isExponential = false ; char currentChar = s . charAt ( currentIndex ) ; switch ( currentChar ) { case ' ' : case ' ' : foundSeparator = true ; break ; case ' ' : // The negative sign following a 'e' or 'E' is not a separator. if ( currentIndex != start && ! isPrevExponential ) { foundSeparator = true ; result . mEndWithNegOrDot = true ; } break ; case ' ' : if ( ! secondDot ) { secondDot = true ; } else { // This is the second dot, and it is considered as a separator. foundSeparator = true ; result . mEndWithNegOrDot = true ; } break ; case ' ' : case ' ' : isExponential = true ; break ; } if ( foundSeparator ) { break ; } } // When there is nothing found, then we put the end position to the end // of the string. result . mEndPosition = currentIndex ; }
Calculate the position of the next comma or space or negative sign
321
14
154,008
protected void runAnimators ( ) { if ( DBG ) { Log . d ( LOG_TAG , "runAnimators() on " + this ) ; } start ( ) ; ArrayMap < Animator , AnimationInfo > runningAnimators = getRunningAnimators ( ) ; // Now start every Animator that was previously created for this transition for ( Animator anim : mAnimators ) { if ( DBG ) { Log . d ( LOG_TAG , " anim: " + anim ) ; } if ( runningAnimators . containsKey ( anim ) ) { start ( ) ; runAnimator ( anim , runningAnimators ) ; } } mAnimators . clear ( ) ; end ( ) ; }
This is called internally once all animations have been set up by the transition hierarchy .
148
16
154,009
protected void start ( ) { if ( mNumInstances == 0 ) { if ( mListeners != null && mListeners . size ( ) > 0 ) { ArrayList < TransitionListener > tmpListeners = ( ArrayList < TransitionListener > ) mListeners . clone ( ) ; int numListeners = tmpListeners . size ( ) ; for ( int i = 0 ; i < numListeners ; ++ i ) { tmpListeners . get ( i ) . onTransitionStart ( this ) ; } } mEnded = false ; } mNumInstances ++ ; }
This method is called automatically by the transition and TransitionSet classes prior to a Transition subclass starting ; subclasses should not need to call it directly .
124
29
154,010
protected void cancel ( ) { int numAnimators = mCurrentAnimators . size ( ) ; for ( int i = numAnimators - 1 ; i >= 0 ; i -- ) { Animator animator = mCurrentAnimators . get ( i ) ; animator . cancel ( ) ; } if ( mListeners != null && mListeners . size ( ) > 0 ) { ArrayList < TransitionListener > tmpListeners = ( ArrayList < TransitionListener > ) mListeners . clone ( ) ; int numListeners = tmpListeners . size ( ) ; for ( int i = 0 ; i < numListeners ; ++ i ) { tmpListeners . get ( i ) . onTransitionCancel ( this ) ; } } }
This method cancels a transition that is currently running .
158
11
154,011
public BatchPoints point ( final Point point ) { point . getTags ( ) . putAll ( this . tags ) ; this . points . add ( point ) ; return this ; }
Add a single Point to these batches .
39
8
154,012
public String lineProtocol ( ) { StringBuilder sb = new StringBuilder ( ) ; for ( Point point : this . points ) { sb . append ( point . lineProtocol ( this . precision ) ) . append ( "\n" ) ; } return sb . toString ( ) ; }
calculate the lineprotocol for all Points .
64
11
154,013
public boolean isMergeAbleWith ( final BatchPoints that ) { return Objects . equals ( database , that . database ) && Objects . equals ( retentionPolicy , that . retentionPolicy ) && Objects . equals ( tags , that . tags ) && consistency == that . consistency ; }
Test whether is possible to merge two BatchPoints objects .
59
12
154,014
public boolean mergeIn ( final BatchPoints that ) { boolean mergeAble = isMergeAbleWith ( that ) ; if ( mergeAble ) { this . points . addAll ( that . points ) ; } return mergeAble ; }
Merge two BatchPoints objects .
53
8
154,015
public Iterable < QueryResult > traverse ( final InputStream is ) { MessageUnpacker unpacker = MessagePack . newDefaultUnpacker ( is ) ; return ( ) -> { return new Iterator < QueryResult > ( ) { @ Override public boolean hasNext ( ) { try { return unpacker . hasNext ( ) ; } catch ( IOException e ) { throw new InfluxDBException ( e ) ; } } @ Override public QueryResult next ( ) { return parse ( unpacker ) ; } } ; } ; }
Traverse over the whole message pack stream . This method can be used for converting query results in chunk .
117
21
154,016
public QueryResult parse ( final InputStream is ) { MessageUnpacker unpacker = MessagePack . newDefaultUnpacker ( is ) ; return parse ( unpacker ) ; }
Parse the message pack stream . This method can be used for converting query result from normal query response where exactly one QueryResult returned
40
26
154,017
public static void checkPositiveNumber ( final Number number , final String name ) throws IllegalArgumentException { if ( number == null || number . doubleValue ( ) <= 0 ) { throw new IllegalArgumentException ( "Expecting a positive number for " + name ) ; } }
Enforces that the number is larger than 0 .
59
10
154,018
public static void checkNotNegativeNumber ( final Number number , final String name ) throws IllegalArgumentException { if ( number == null || number . doubleValue ( ) < 0 ) { throw new IllegalArgumentException ( "Expecting a positive or zero number for " + name ) ; } }
Enforces that the number is not negative .
62
9
154,019
public static void checkDuration ( final String duration , final String name ) throws IllegalArgumentException { if ( ! duration . matches ( "(\\d+[wdmhs])+|inf" ) ) { throw new IllegalArgumentException ( "Invalid InfluxDB duration: " + duration + " for " + name ) ; } }
Enforces that the duration is a valid influxDB duration .
70
12
154,020
public BatchOptions jitterDuration ( final int jitterDuration ) { BatchOptions clone = getClone ( ) ; clone . jitterDuration = jitterDuration ; return clone ; }
Jitters the batch flush interval by a random amount . This is primarily to avoid large write spikes for users running a large number of client instances . ie a jitter of 5s and flush duration 10s means flushes will happen every 10 - 15s .
40
52
154,021
public BatchOptions bufferLimit ( final int bufferLimit ) { BatchOptions clone = getClone ( ) ; clone . bufferLimit = bufferLimit ; return clone ; }
The client maintains a buffer for failed writes so that the writes will be retried later on . This may help to overcome temporary network problems or InfluxDB load spikes . When the buffer is full and new points are written oldest entries in the buffer are lost .
36
52
154,022
private Call < QueryResult > callQuery ( final Query query ) { Call < QueryResult > call ; String db = query . getDatabase ( ) ; if ( db == null ) { db = this . database ; } if ( query instanceof BoundParameterQuery ) { BoundParameterQuery boundParameterQuery = ( BoundParameterQuery ) query ; call = this . influxDBService . postQuery ( db , query . getCommandWithUrlEncoded ( ) , boundParameterQuery . getParameterJsonWithUrlEncoded ( ) ) ; } else { if ( query . requiresPost ( ) ) { call = this . influxDBService . postQuery ( db , query . getCommandWithUrlEncoded ( ) ) ; } else { call = this . influxDBService . query ( db , query . getCommandWithUrlEncoded ( ) ) ; } } return call ; }
Calls the influxDBService for the query .
186
12
154,023
public static InfluxDBException buildExceptionForErrorState ( final InputStream messagePackErrorBody ) { try { MessageUnpacker unpacker = MessagePack . newDefaultUnpacker ( messagePackErrorBody ) ; ImmutableMapValue mapVal = ( ImmutableMapValue ) unpacker . unpackValue ( ) ; return InfluxDBException . buildExceptionFromErrorMessage ( mapVal . map ( ) . get ( new ImmutableStringValueImpl ( "error" ) ) . toString ( ) ) ; } catch ( Exception e ) { return new InfluxDBException ( e ) ; } }
Create corresponding InfluxDBException from the message pack error body .
128
13
154,024
void put ( final AbstractBatchEntry batchEntry ) { try { this . queue . put ( batchEntry ) ; } catch ( InterruptedException e ) { throw new RuntimeException ( e ) ; } if ( this . queue . size ( ) >= this . actions ) { this . scheduler . submit ( new Runnable ( ) { @ Override public void run ( ) { write ( ) ; } } ) ; } }
Put a single BatchEntry to the cache for later processing .
90
13
154,025
public static Builder measurementByPOJO ( final Class < ? > clazz ) { Objects . requireNonNull ( clazz , "clazz" ) ; throwExceptionIfMissingAnnotation ( clazz , Measurement . class ) ; String measurementName = findMeasurementName ( clazz ) ; return new Builder ( measurementName ) ; }
Create a new Point Build build to create a new Point in a fluent manner from a POJO .
70
20
154,026
protected void restoreState ( View view , Set < ViewCommand < View > > currentState ) { if ( mViewCommands . isEmpty ( ) ) { return ; } mViewCommands . reapply ( view , currentState ) ; }
Apply saved state to attached view
51
6
154,027
public void attachView ( View view ) { if ( view == null ) { throw new IllegalArgumentException ( "Mvp view must be not null" ) ; } boolean isViewAdded = mViews . add ( view ) ; if ( ! isViewAdded ) { return ; } mInRestoreState . add ( view ) ; Set < ViewCommand < View > > currentState = mViewStates . get ( view ) ; currentState = currentState == null ? Collections . < ViewCommand < View > > emptySet ( ) : currentState ; restoreState ( view , currentState ) ; mViewStates . remove ( view ) ; mInRestoreState . remove ( view ) ; }
Attach view to view state and apply saves state
146
9
154,028
public < T extends MvpPresenter > void add ( String tag , T instance ) { mPresenters . put ( tag , instance ) ; }
Add presenter to storage
31
4
154,029
@ SuppressWarnings ( "unused" ) public boolean isInRestoreState ( View view ) { //noinspection SimplifiableIfStatement if ( mViewState != null ) { return mViewState . isInRestoreState ( view ) ; } return false ; }
Check if view is in restore state or not
60
9
154,030
@ SuppressWarnings ( { "unchecked" , "unused" } ) public void setViewState ( MvpViewState < View > viewState ) { mViewStateAsView = ( View ) viewState ; mViewState = ( MvpViewState ) viewState ; }
Set view state to presenter
62
5
154,031
private static boolean hasMoxyReflector ( ) { if ( hasMoxyReflector != null ) { return hasMoxyReflector ; } try { new MoxyReflector ( ) ; hasMoxyReflector = true ; } catch ( NoClassDefFoundError error ) { hasMoxyReflector = false ; } return hasMoxyReflector ; }
Check is it have generated MoxyReflector without usage of reflection API
83
15
154,032
public void onSaveInstanceState ( Bundle outState ) { if ( mParentDelegate == null ) { Bundle moxyDelegateBundle = new Bundle ( ) ; outState . putBundle ( MOXY_DELEGATE_TAGS_KEY , moxyDelegateBundle ) ; outState = moxyDelegateBundle ; } outState . putAll ( mBundle ) ; outState . putString ( mKeyTag , mDelegateTag ) ; for ( MvpDelegate childDelegate : mChildDelegates ) { childDelegate . onSaveInstanceState ( outState ) ; } }
Save presenters tag prefix to save state for restore presenters at future after delegate recreate
132
17
154,033
private static SortedMap < TypeElement , List < TypeElement > > getPresenterBinders ( List < TypeElement > presentersContainers ) { Map < TypeElement , TypeElement > extendingMap = new HashMap <> ( ) ; for ( TypeElement presentersContainer : presentersContainers ) { TypeMirror superclass = presentersContainer . getSuperclass ( ) ; TypeElement parent = null ; while ( superclass . getKind ( ) == TypeKind . DECLARED ) { TypeElement superclassElement = ( TypeElement ) ( ( DeclaredType ) superclass ) . asElement ( ) ; if ( presentersContainers . contains ( superclassElement ) ) { parent = superclassElement ; break ; } superclass = superclassElement . getSuperclass ( ) ; } extendingMap . put ( presentersContainer , parent ) ; } // TreeMap for sorting SortedMap < TypeElement , List < TypeElement > > elementListMap = new TreeMap <> ( TYPE_ELEMENT_COMPARATOR ) ; for ( TypeElement presentersContainer : presentersContainers ) { ArrayList < TypeElement > typeElements = new ArrayList <> ( ) ; typeElements . add ( presentersContainer ) ; TypeElement key = presentersContainer ; while ( ( key = extendingMap . get ( key ) ) != null ) { typeElements . add ( key ) ; } elementListMap . put ( presentersContainer , typeElements ) ; } return elementListMap ; }
Collects presenter binders from superclasses that are also presenter containers .
321
14
154,034
public void injectPresenter ( MvpPresenter < ? > presenter , String delegateTag ) { Set < String > delegateTags = mConnections . get ( presenter ) ; if ( delegateTags == null ) { delegateTags = new HashSet <> ( ) ; mConnections . put ( presenter , delegateTags ) ; } delegateTags . add ( delegateTag ) ; Set < MvpPresenter > presenters = mTags . get ( delegateTag ) ; if ( presenters == null ) { presenters = new HashSet <> ( ) ; mTags . put ( delegateTag , presenters ) ; } presenters . add ( presenter ) ; }
Save delegate tag when it inject presenter to delegate s object
137
11
154,035
public boolean rejectPresenter ( MvpPresenter < ? > presenter , String delegateTag ) { Set < MvpPresenter > presenters = mTags . get ( delegateTag ) ; if ( presenters != null ) { presenters . remove ( presenter ) ; } if ( presenters == null || presenters . isEmpty ( ) ) { mTags . remove ( delegateTag ) ; } Set < String > delegateTags = mConnections . get ( presenter ) ; if ( delegateTags == null ) { mConnections . remove ( presenter ) ; return true ; } Iterator < String > tagsIterator = delegateTags . iterator ( ) ; while ( tagsIterator . hasNext ( ) ) { String tag = tagsIterator . next ( ) ; if ( tag . startsWith ( delegateTag ) ) { tagsIterator . remove ( ) ; } } boolean noTags = delegateTags . isEmpty ( ) ; if ( noTags ) { mConnections . remove ( presenter ) ; } return noTags ; }
Remove tag when delegate s object was fully destroyed
209
9
154,036
public void bind ( boolean wholeCore ) { if ( bound && assignedThread != null && assignedThread . isAlive ( ) ) throw new IllegalStateException ( "cpu " + cpuId + " already bound to " + assignedThread ) ; if ( areAssertionsEnabled ( ) ) boundHere = new Throwable ( "Bound here" ) ; if ( wholeCore ) { lockInventory . bindWholeCore ( cpuId ) ; } else if ( cpuId >= 0 ) { bound = true ; assignedThread = Thread . currentThread ( ) ; LOGGER . info ( "Assigning cpu {} to {}" , cpuId , assignedThread ) ; } if ( cpuId >= 0 ) { BitSet affinity = new BitSet ( ) ; affinity . set ( cpuId , true ) ; Affinity . setAffinity ( affinity ) ; } }
Bind the current thread to this reservable lock .
178
10
154,037
public static String toHexString ( final BitSet set ) { ByteArrayOutputStream out = new ByteArrayOutputStream ( ) ; PrintWriter writer = new PrintWriter ( out ) ; final long [ ] longs = set . toLongArray ( ) ; for ( long aLong : longs ) { writer . write ( Long . toHexString ( aLong ) ) ; } writer . flush ( ) ; return new String ( out . toByteArray ( ) , java . nio . charset . StandardCharsets . UTF_8 ) ; }
Creates a hexademical representation of the bit set
118
11
154,038
int [ ] getChunkSizes ( Track track ) { long [ ] referenceChunkStarts = fragmenter . sampleNumbers ( track ) ; int [ ] chunkSizes = new int [ referenceChunkStarts . length ] ; for ( int i = 0 ; i < referenceChunkStarts . length ; i ++ ) { long start = referenceChunkStarts [ i ] - 1 ; long end ; if ( referenceChunkStarts . length == i + 1 ) { end = track . getSamples ( ) . size ( ) ; } else { end = referenceChunkStarts [ i + 1 ] - 1 ; } chunkSizes [ i ] = l2i ( end - start ) ; } assert DefaultMp4Builder . this . track2Sample . get ( track ) . size ( ) == sum ( chunkSizes ) : "The number of samples and the sum of all chunk lengths must be equal" ; return chunkSizes ; }
Gets the chunk sizes for the given track .
204
10
154,039
private void print ( FileChannel fc , int level , long start , long end ) throws IOException { fc . position ( start ) ; if ( end <= 0 ) { end = start + fc . size ( ) ; System . out . println ( "Setting END to " + end ) ; } while ( end - fc . position ( ) > 8 ) { long begin = fc . position ( ) ; ByteBuffer bb = ByteBuffer . allocate ( 8 ) ; fc . read ( bb ) ; bb . rewind ( ) ; long size = IsoTypeReader . readUInt32 ( bb ) ; String type = IsoTypeReader . read4cc ( bb ) ; long fin = begin + size ; // indent by the required number of spaces for ( int i = 0 ; i < level ; i ++ ) { System . out . print ( " " ) ; } System . out . println ( type + "@" + ( begin ) + " size: " + size ) ; if ( containers . contains ( type ) ) { print ( fc , level + 1 , begin + 8 , fin ) ; if ( fc . position ( ) != fin ) { System . out . println ( "End of container contents at " + fc . position ( ) ) ; System . out . println ( " FIN = " + fin ) ; } } fc . position ( fin ) ; } }
Parses the FileChannel in the range [ start end ) and prints the elements found
299
18
154,040
public ParsableBox parseBox ( ReadableByteChannel byteChannel , String parentType ) throws IOException { header . get ( ) . rewind ( ) . limit ( 8 ) ; int bytesRead = 0 ; int b ; while ( ( b = byteChannel . read ( header . get ( ) ) ) + bytesRead < 8 ) { if ( b < 0 ) { throw new EOFException ( ) ; } else { bytesRead += b ; } } header . get ( ) . rewind ( ) ; long size = IsoTypeReader . readUInt32 ( header . get ( ) ) ; // do plausibility check if ( size < 8 && size > 1 ) { LOG . error ( "Plausibility check failed: size < 8 (size = {}). Stop parsing!" , size ) ; return null ; } String type = IsoTypeReader . read4cc ( header . get ( ) ) ; //System.err.println(type); byte [ ] usertype = null ; long contentSize ; if ( size == 1 ) { header . get ( ) . limit ( 16 ) ; byteChannel . read ( header . get ( ) ) ; header . get ( ) . position ( 8 ) ; size = IsoTypeReader . readUInt64 ( header . get ( ) ) ; contentSize = size - 16 ; } else if ( size == 0 ) { throw new RuntimeException ( "box size of zero means 'till end of file. That is not yet supported" ) ; } else { contentSize = size - 8 ; } if ( UserBox . TYPE . equals ( type ) ) { header . get ( ) . limit ( header . get ( ) . limit ( ) + 16 ) ; byteChannel . read ( header . get ( ) ) ; usertype = new byte [ 16 ] ; for ( int i = header . get ( ) . position ( ) - 16 ; i < header . get ( ) . position ( ) ; i ++ ) { usertype [ i - ( header . get ( ) . position ( ) - 16 ) ] = header . get ( ) . get ( i ) ; } contentSize -= 16 ; } ParsableBox parsableBox = null ; if ( skippedTypes != null && skippedTypes . contains ( type ) ) { LOG . trace ( "Skipping box {} {} {}" , type , usertype , parentType ) ; parsableBox = new SkipBox ( type , usertype , parentType ) ; } else { LOG . trace ( "Creating box {} {} {}" , type , usertype , parentType ) ; parsableBox = createBox ( type , usertype , parentType ) ; } //LOG.finest("Parsing " + box.getType()); // System.out.println("parsing " + Mp4Arrays.toString(box.getType()) + " " + box.getClass().getName() + " size=" + size); header . get ( ) . rewind ( ) ; parsableBox . parse ( byteChannel , header . get ( ) , contentSize , this ) ; return parsableBox ; }
Parses the next size and type creates a box instance and parses the box s content .
654
20
154,041
public static List < long [ ] > getSyncSamplesTimestamps ( Movie movie , Track track ) { List < long [ ] > times = new LinkedList < long [ ] > ( ) ; for ( Track currentTrack : movie . getTracks ( ) ) { if ( currentTrack . getHandler ( ) . equals ( track . getHandler ( ) ) ) { long [ ] currentTrackSyncSamples = currentTrack . getSyncSamples ( ) ; if ( currentTrackSyncSamples != null && currentTrackSyncSamples . length > 0 ) { final long [ ] currentTrackTimes = getTimes ( currentTrack , movie ) ; times . add ( currentTrackTimes ) ; } } } return times ; }
Calculates the timestamp of all tracks sync samples .
152
11
154,042
public static int [ ] blowupCompositionTimes ( List < CompositionTimeToSample . Entry > entries ) { long numOfSamples = 0 ; for ( CompositionTimeToSample . Entry entry : entries ) { numOfSamples += entry . getCount ( ) ; } assert numOfSamples <= Integer . MAX_VALUE ; int [ ] decodingTime = new int [ ( int ) numOfSamples ] ; int current = 0 ; for ( CompositionTimeToSample . Entry entry : entries ) { for ( int i = 0 ; i < entry . getCount ( ) ; i ++ ) { decodingTime [ current ++ ] = entry . getOffset ( ) ; } } return decodingTime ; }
Decompresses the list of entries and returns the list of composition times .
149
15
154,043
public static String readString ( ByteBuffer byteBuffer ) { ByteArrayOutputStream out = new ByteArrayOutputStream ( ) ; int read ; while ( ( read = byteBuffer . get ( ) ) != 0 ) { out . write ( read ) ; } return Utf8 . convert ( out . toByteArray ( ) ) ; }
Reads a zero terminated UTF - 8 string .
70
10
154,044
protected boolean isChunkReady ( StreamingTrack streamingTrack , StreamingSample next ) { long ts = nextSampleStartTime . get ( streamingTrack ) ; long cfst = nextChunkCreateStartTime . get ( streamingTrack ) ; return ( ts >= cfst + 2 * streamingTrack . getTimescale ( ) ) ; // chunk interleave of 2 seconds }
Tests if the currently received samples for a given track are already a chunk as we want to have it . The next sample will not be part of the chunk will be added to the fragment buffer later .
75
41
154,045
protected boolean isFragmentReady ( StreamingTrack streamingTrack , StreamingSample next ) { long ts = nextSampleStartTime . get ( streamingTrack ) ; long cfst = nextFragmentCreateStartTime . get ( streamingTrack ) ; if ( ( ts > cfst + 3 * streamingTrack . getTimescale ( ) ) ) { // mininum fragment length == 3 seconds SampleFlagsSampleExtension sfExt = next . getSampleExtension ( SampleFlagsSampleExtension . class ) ; if ( sfExt == null || sfExt . isSyncSample ( ) ) { //System.err.println(streamingTrack + " ready at " + ts); // the next sample needs to be a sync sample // when there is no SampleFlagsSampleExtension we assume syncSample == true return true ; } } return false ; }
Tests if the currently received samples for a given track form a valid fragment taking the latest received sample into account . The next sample is not part of the segment and will be added to the fragment buffer later .
175
42
154,046
protected long [ ] getSampleSizes ( long startSample , long endSample , Track track , int sequenceNumber ) { List < Sample > samples = getSamples ( startSample , endSample , track ) ; long [ ] sampleSizes = new long [ samples . size ( ) ] ; for ( int i = 0 ; i < sampleSizes . length ; i ++ ) { sampleSizes [ i ] = samples . get ( i ) . getSize ( ) ; } return sampleSizes ; }
Gets the sizes of a sequence of samples .
106
10
154,047
protected ParsableBox createMoof ( long startSample , long endSample , Track track , int sequenceNumber ) { MovieFragmentBox moof = new MovieFragmentBox ( ) ; createMfhd ( startSample , endSample , track , sequenceNumber , moof ) ; createTraf ( startSample , endSample , track , sequenceNumber , moof ) ; TrackRunBox firstTrun = moof . getTrackRunBoxes ( ) . get ( 0 ) ; firstTrun . setDataOffset ( 1 ) ; // dummy to make size correct firstTrun . setDataOffset ( ( int ) ( 8 + moof . getSize ( ) ) ) ; // mdat header + moof size return moof ; }
Creates a moof box for a given sequence of samples .
155
13
154,048
protected ParsableBox createMvhd ( Movie movie ) { MovieHeaderBox mvhd = new MovieHeaderBox ( ) ; mvhd . setVersion ( 1 ) ; mvhd . setCreationTime ( getDate ( ) ) ; mvhd . setModificationTime ( getDate ( ) ) ; mvhd . setDuration ( 0 ) ; //no duration in moov for fragmented movies long movieTimeScale = movie . getTimescale ( ) ; mvhd . setTimescale ( movieTimeScale ) ; // find the next available trackId long nextTrackId = 0 ; for ( Track track : movie . getTracks ( ) ) { nextTrackId = nextTrackId < track . getTrackMetaData ( ) . getTrackId ( ) ? track . getTrackMetaData ( ) . getTrackId ( ) : nextTrackId ; } mvhd . setNextTrackId ( ++ nextTrackId ) ; return mvhd ; }
Creates a single mvhd movie header box for a given movie .
204
15
154,049
public synchronized final void parseDetails ( ) { LOG . debug ( "parsing details of {}" , this . getType ( ) ) ; if ( content != null ) { ByteBuffer content = this . content ; isParsed = true ; content . rewind ( ) ; _parseDetails ( content ) ; if ( content . remaining ( ) > 0 ) { deadBytes = content . slice ( ) ; } this . content = null ; assert verify ( content ) ; } }
Parses the raw content of the box . It surrounds the actual parsing which is done
101
18
154,050
public long getSize ( ) { long size = isParsed ? getContentSize ( ) : content . limit ( ) ; size += ( 8 + // size|type ( size >= ( ( 1L << 32 ) - 8 ) ? 8 : 0 ) + // 32bit - 8 byte size and type ( UserBox . TYPE . equals ( getType ( ) ) ? 16 : 0 ) ) ; size += ( deadBytes == null ? 0 : deadBytes . limit ( ) ) ; return size ; }
Gets the full size of the box including header and content .
106
13
154,051
private boolean verify ( ByteBuffer content ) { ByteBuffer bb = ByteBuffer . allocate ( l2i ( getContentSize ( ) + ( deadBytes != null ? deadBytes . limit ( ) : 0 ) ) ) ; getContent ( bb ) ; if ( deadBytes != null ) { deadBytes . rewind ( ) ; while ( deadBytes . remaining ( ) > 0 ) { bb . put ( deadBytes ) ; } } content . rewind ( ) ; bb . rewind ( ) ; if ( content . remaining ( ) != bb . remaining ( ) ) { LOG . error ( "{}: remaining differs {} vs. {}" , this . getType ( ) , content . remaining ( ) , bb . remaining ( ) ) ; return false ; } int p = content . position ( ) ; for ( int i = content . limit ( ) - 1 , j = bb . limit ( ) - 1 ; i >= p ; i -- , j -- ) { byte v1 = content . get ( i ) ; byte v2 = bb . get ( j ) ; if ( v1 != v2 ) { LOG . error ( "{}: buffers differ at {}: {}/{}" , this . getType ( ) , i , v1 , v2 ) ; byte [ ] b1 = new byte [ content . remaining ( ) ] ; byte [ ] b2 = new byte [ bb . remaining ( ) ] ; content . get ( b1 ) ; bb . get ( b2 ) ; LOG . error ( "original : {}" , Hex . encodeHex ( b1 , 4 ) ) ; LOG . error ( "reconstructed : {}" , Hex . encodeHex ( b2 , 4 ) ) ; return false ; } } return true ; }
Verifies that a box can be reconstructed byte - exact after parsing .
378
14
154,052
static int [ ] allTags ( ) { int [ ] ints = new int [ 0xFE - 0x6A ] ; for ( int i = 0x6A ; i < 0xFE ; i ++ ) { final int pos = i - 0x6A ; LOG . trace ( "pos: {}" , pos ) ; ints [ pos ] = i ; } return ints ; }
ExtDescrTagEndRange = 0xFE
86
10
154,053
public String [ ] getAllTagNames ( ) { String names [ ] = new String [ tags . size ( ) ] ; for ( int i = 0 ; i < tags . size ( ) ; i ++ ) { XtraTag tag = tags . elementAt ( i ) ; names [ i ] = tag . tagName ; } return names ; }
Returns a list of the tag names present in this Xtra Box
73
13
154,054
public String getFirstStringValue ( String name ) { Object objs [ ] = getValues ( name ) ; for ( Object obj : objs ) { if ( obj instanceof String ) { return ( String ) obj ; } } return null ; }
Returns the first String value found for this tag
52
9
154,055
public Date getFirstDateValue ( String name ) { Object objs [ ] = getValues ( name ) ; for ( Object obj : objs ) { if ( obj instanceof Date ) { return ( Date ) obj ; } } return null ; }
Returns the first Date value found for this tag
52
9
154,056
public Long getFirstLongValue ( String name ) { Object objs [ ] = getValues ( name ) ; for ( Object obj : objs ) { if ( obj instanceof Long ) { return ( Long ) obj ; } } return null ; }
Returns the first Long value found for this tag
52
9
154,057
public Object [ ] getValues ( String name ) { XtraTag tag = getTagByName ( name ) ; Object values [ ] ; if ( tag != null ) { values = new Object [ tag . values . size ( ) ] ; for ( int i = 0 ; i < tag . values . size ( ) ; i ++ ) { values [ i ] = tag . values . elementAt ( i ) . getValueAsObject ( ) ; } } else { values = new Object [ 0 ] ; } return values ; }
Returns an array of values for this tag . Empty array when tag is not present
110
16
154,058
public void setTagValues ( String name , String values [ ] ) { removeTag ( name ) ; XtraTag tag = new XtraTag ( name ) ; for ( int i = 0 ; i < values . length ; i ++ ) { tag . values . addElement ( new XtraValue ( values [ i ] ) ) ; } tags . addElement ( tag ) ; }
Removes and recreates tag using specified String values
80
11
154,059
public void setTagValue ( String name , Date date ) { removeTag ( name ) ; XtraTag tag = new XtraTag ( name ) ; tag . values . addElement ( new XtraValue ( date ) ) ; tags . addElement ( tag ) ; }
Removes and recreates tag using specified Date value
57
11
154,060
public void setTagValue ( String name , long value ) { removeTag ( name ) ; XtraTag tag = new XtraTag ( name ) ; tag . values . addElement ( new XtraValue ( value ) ) ; tags . addElement ( tag ) ; }
Removes and recreates tag using specified Long value
57
11
154,061
public long [ ] blowup ( int chunkCount ) { long [ ] numberOfSamples = new long [ chunkCount ] ; int j = 0 ; List < SampleToChunkBox . Entry > sampleToChunkEntries = new LinkedList < Entry > ( entries ) ; Collections . reverse ( sampleToChunkEntries ) ; Iterator < Entry > iterator = sampleToChunkEntries . iterator ( ) ; SampleToChunkBox . Entry currentEntry = iterator . next ( ) ; for ( int i = numberOfSamples . length ; i > 1 ; i -- ) { numberOfSamples [ i - 1 ] = currentEntry . getSamplesPerChunk ( ) ; if ( i == currentEntry . getFirstChunk ( ) ) { currentEntry = iterator . next ( ) ; } } numberOfSamples [ 0 ] = currentEntry . getSamplesPerChunk ( ) ; return numberOfSamples ; }
Decompresses the list of entries and returns the number of samples per chunk for every single chunk .
201
20
154,062
public static synchronized long [ ] blowupTimeToSamples ( List < TimeToSampleBox . Entry > entries ) { SoftReference < long [ ] > cacheEntry ; if ( ( cacheEntry = cache . get ( entries ) ) != null ) { long [ ] cacheVal ; if ( ( cacheVal = cacheEntry . get ( ) ) != null ) { return cacheVal ; } } long numOfSamples = 0 ; for ( TimeToSampleBox . Entry entry : entries ) { numOfSamples += entry . getCount ( ) ; } assert numOfSamples <= Integer . MAX_VALUE ; long [ ] decodingTime = new long [ ( int ) numOfSamples ] ; int current = 0 ; for ( TimeToSampleBox . Entry entry : entries ) { for ( int i = 0 ; i < entry . getCount ( ) ; i ++ ) { decodingTime [ current ++ ] = entry . getDelta ( ) ; } } cache . put ( entries , new SoftReference < long [ ] > ( decodingTime ) ) ; return decodingTime ; }
Decompresses the list of entries and returns the list of decoding times .
224
15
154,063
void register ( Object listener ) { Multimap < Class < ? > , Subscriber > listenerMethods = findAllSubscribers ( listener ) ; for ( Map . Entry < Class < ? > , Collection < Subscriber > > entry : listenerMethods . asMap ( ) . entrySet ( ) ) { Class < ? > eventType = entry . getKey ( ) ; Collection < Subscriber > eventMethodsInListener = entry . getValue ( ) ; CopyOnWriteArraySet < Subscriber > eventSubscribers = subscribers . get ( eventType ) ; if ( eventSubscribers == null ) { CopyOnWriteArraySet < Subscriber > newSet = new CopyOnWriteArraySet < Subscriber > ( ) ; eventSubscribers = MoreObjects . firstNonNull ( subscribers . putIfAbsent ( eventType , newSet ) , newSet ) ; } eventSubscribers . addAll ( eventMethodsInListener ) ; } }
Registers all subscriber methods on the given listener object .
206
11
154,064
public int deleteRow ( ) { // build the delete string String deleteString = "DELETE FROM " + tableName + this . generatePKWhere ( ) ; PreparedStatement ps = null ; // System.out.println("delete string "+deleteString); try { // fill the question marks ps = cConn . prepareStatement ( deleteString ) ; ps . clearParameters ( ) ; int i ; for ( int j = 0 ; j < primaryKeys . length ; j ++ ) { ps . setObject ( j + 1 , resultRowPKs [ aktRowNr ] [ j ] ) ; } // end of for (int i=0; i<primaryKeys.length; i++) ps . executeUpdate ( ) ; } catch ( SQLException e ) { ZaurusEditor . printStatus ( "SQL Exception: " + e . getMessage ( ) ) ; return 0 ; } finally { try { if ( ps != null ) { ps . close ( ) ; } } catch ( SQLException e ) { } } // delete the corresponding primary key values from resultRowPKs numberOfResult -- ; for ( int i = aktRowNr ; i < numberOfResult ; i ++ ) { for ( int j = 0 ; j < primaryKeys . length ; j ++ ) { resultRowPKs [ i ] [ j ] = resultRowPKs [ i + 1 ] [ j ] ; } } // there are the following outcomes after deleting aktRowNr: /* A B C D E F no rows left J N N N N N one row left - J N J N N deleted row was the last row - J J N N N deleted row was the pre-last - - - - J N first D X + D + * . D X X D D . D X + last X new numberOfResult 0 1 2 1 2 2 old aktRowNr 0 1 2 0 1 0 D - deleted row X - any one row + - one or more rows * - zero or more rows */ // A. return to the search panel and tell 'last row deleted' on the status line // B. show the previous row and disable previous button // C. show the previous row as akt row // D. show akt row and disable next button // E. show akt row and disable next button // F. show akt row // these actions reduce to the following actions for ZaurusEditor: // 1. show search panel // 2. disable previous button // 3. disable next button // 4. do nothing // and 1,2,3,4 are the possible return codes int actionCode ; if ( numberOfResult == 0 ) { // case A actionCode = 1 ; ZaurusEditor . printStatus ( "Last row was deleted." ) ; return actionCode ; } else if ( numberOfResult == aktRowNr ) { // B or C // new aktRow is previous row aktRowNr -- ; if ( aktRowNr == 0 ) { // B actionCode = 2 ; } else { // C actionCode = 4 ; } // end of if (aktRowNr == 0) } else { // D, E, F if ( numberOfResult >= 2 && aktRowNr < numberOfResult - 1 ) { // F actionCode = 4 ; } else { actionCode = 3 ; } // end of else } this . showAktRow ( ) ; ZaurusEditor . printStatus ( "Row was deleted." ) ; return actionCode ; }
delete current row answer special action codes see comment below
742
10
154,065
public String getPrimaryKeysString ( ) { String result = "" ; for ( int i = 0 ; i < primaryKeys . length ; i ++ ) { if ( result != "" ) { result += ", " ; } result += primaryKeys [ i ] ; } // end of for (int i=0; i<primaryKeys.length; i++) return result ; }
answer a String containing a String list of primary keys i . e . pk1 pk2 pk3
77
23
154,066
public void insertNewRow ( ) { // reset all fields for ( int i = 0 ; i < komponente . length ; i ++ ) { komponente [ i ] . clearContent ( ) ; } // end of for (int i=0; i<komponente.length; i++) // reset the field for the primary keys for ( int i = 0 ; i < primaryKeys . length ; i ++ ) { komponente [ pkColIndex [ i ] ] . setEditable ( true ) ; } ZaurusEditor . printStatus ( "enter a new row for table " + tableName ) ; }
open the panel to insert a new row into the table
136
11
154,067
public boolean saveChanges ( ) { // the initial settings of the textfields counts with one // so a real change by the user needs as many changes as there are columns // System.out.print("Anderungen in den Feldern: "); // there are changes to the database // memorize all columns which have been changed int [ ] changedColumns = new int [ columns . length ] ; int countChanged = 0 ; // build the update string String updateString = "" ; for ( int i = 0 ; i < columns . length ; i ++ ) { if ( komponente [ i ] . hasChanged ( ) ) { if ( updateString != "" ) { updateString += ", " ; } updateString += columns [ i ] + "=?" ; changedColumns [ countChanged ++ ] = i ; } } // end of for (int i=0; i<columns.length; i++) if ( countChanged > 0 ) { updateString = "UPDATE " + tableName + " SET " + updateString + this . generatePKWhere ( ) ; PreparedStatement ps = null ; // System.out.println("update "+updateString); try { // fill the question marks ps = cConn . prepareStatement ( updateString ) ; ps . clearParameters ( ) ; int i ; for ( i = 0 ; i < countChanged ; i ++ ) { ps . setObject ( i + 1 , komponente [ changedColumns [ i ] ] . getContent ( ) ) ; // System.out.print(" changed feld "+komponente[changedColumns[i]].getContent()); } // end of for (int i=0; i<countChanged; i++) // System.out.println(); for ( int j = 0 ; j < primaryKeys . length ; j ++ ) { ps . setObject ( i + j + 1 , resultRowPKs [ aktRowNr ] [ j ] ) ; } // end of for (int i=0; i<primaryKeys.length; i++) ps . executeUpdate ( ) ; ZaurusEditor . printStatus ( "changed row was saved to table " + tableName ) ; return true ; } catch ( SQLException e ) { ZaurusEditor . printStatus ( "SQL Exception: " + e . getMessage ( ) ) ; return false ; } finally { try { if ( ps != null ) { ps . close ( ) ; } } catch ( SQLException e ) { } } } else { // System.out.println("no changes"); return true ; } // end of if (changed) }
answer true if the update succeeds
552
6
154,068
public boolean saveNewRow ( ) { // check the fields of the primary keys whether one is empty boolean onePKempty = false ; int tmp ; PreparedStatement ps = null ; for ( tmp = 0 ; tmp < primaryKeys . length ; tmp ++ ) { if ( komponente [ pkColIndex [ tmp ] ] . getContent ( ) . equals ( "" ) ) { onePKempty = true ; break ; } } if ( onePKempty ) { komponente [ pkColIndex [ tmp ] ] . requestFocus ( ) ; ZaurusEditor . printStatus ( "no value for primary key " + primaryKeys [ tmp ] ) ; return false ; } // end of if (onePKempty) // build the insert string String insertString = "INSERT INTO " + tableName + " VALUES(" ; for ( int j = 0 ; j < columns . length ; j ++ ) { if ( j > 0 ) { insertString += ", " ; } insertString += "?" ; } // end of for (int i=0; i<columns.length; i++) insertString += ")" ; // System.out.println("insert string "+insertString); try { // fill the question marks ps = cConn . prepareStatement ( insertString ) ; ps . clearParameters ( ) ; int i ; for ( i = 0 ; i < columns . length ; i ++ ) { ps . setObject ( i + 1 , komponente [ i ] . getContent ( ) ) ; } ps . executeUpdate ( ) ; ZaurusEditor . printStatus ( "new row was saved to table " + tableName ) ; return true ; } catch ( SQLException e ) { ZaurusEditor . printStatus ( "SQL Exception: " + e . getMessage ( ) ) ; return false ; } finally { try { if ( ps != null ) { ps . close ( ) ; } } catch ( SQLException e ) { } } }
answer true if saving succeeds
416
5
154,069
public int searchRows ( String [ ] words , boolean allWords , boolean ignoreCase , boolean noMatchWhole ) { // System.out.print("search in " + tableName + " for: "); // for (int i=0; i < words.length; i++) { // System.out.print(words[i]+", "); // } // System.out.println("allWords = "+allWords+", ignoreCase = "+ignoreCase+", noMatchWhole= "+noMatchWhole); String where = this . generateWhere ( words , allWords , ignoreCase , noMatchWhole ) ; Vector temp = new Vector ( 20 ) ; Statement stmt = null ; try { stmt = cConn . createStatement ( ) ; ResultSet rs = stmt . executeQuery ( "SELECT " + this . getPrimaryKeysString ( ) + " FROM " + tableName + where ) ; while ( rs . next ( ) ) { Object [ ] pkValues = new Object [ primaryKeys . length ] ; for ( int i = 0 ; i < primaryKeys . length ; i ++ ) { pkValues [ i ] = rs . getObject ( pkColIndex [ i ] + 1 ) ; } // end of for (int i=0; i<primaryKeys.length; i++) temp . addElement ( pkValues ) ; } rs . close ( ) ; } catch ( SQLException e ) { ZaurusEditor . printStatus ( "SQL Exception: " + e . getMessage ( ) ) ; return - 1 ; } finally { try { if ( stmt != null ) { stmt . close ( ) ; } } catch ( SQLException e ) { } } resultRowPKs = new Object [ temp . size ( ) ] [ primaryKeys . length ] ; numberOfResult = temp . size ( ) ; for ( int i = 0 ; i < primaryKeys . length ; i ++ ) { for ( int j = 0 ; j < temp . size ( ) ; j ++ ) { resultRowPKs [ j ] [ i ] = ( ( Object [ ] ) temp . elementAt ( j ) ) [ i ] ; } // end of for (int j=0; j<temp.size(); j++) } // end of for (int i=0; i<primaryKeys.length; i++) // prepare statement for fetching the result rows for later use String stmtString = "SELECT * FROM " + tableName ; try { pStmt = cConn . prepareStatement ( stmtString + this . generatePKWhere ( ) ) ; } catch ( SQLException e ) { System . out . println ( "SQL Exception: " + e . getMessage ( ) ) ; } // end of try-catch // System.out.println("prepared statement: "+stmtString); if ( numberOfResult > 0 ) { this . disablePKFields ( ) ; aktRowNr = 0 ; this . showAktRow ( ) ; } // end of if (numberOfResult > 0) // System.out.println("number of rows: "+numberOfResult); return numberOfResult ; }
answer the number of found rows - 1 if there is an SQL exception
675
14
154,070
private void disablePKFields ( ) { for ( int i = 0 ; i < primaryKeys . length ; i ++ ) { komponente [ pkColIndex [ i ] ] . setEditable ( false ) ; } // end of for (int i=0; i<columns.length; i++) }
set all fields for primary keys to not editable
69
10
154,071
private void fillZChoice ( ZaurusChoice zc , String tab , String col ) { Statement stmt = null ; try { if ( cConn == null ) { return ; } stmt = cConn . createStatement ( ) ; ResultSet rs = stmt . executeQuery ( "SELECT * FROM " + tab + " ORDER BY " + col ) ; ResultSetMetaData rsmd = rs . getMetaData ( ) ; int numberOfColumns = rsmd . getColumnCount ( ) ; int colIndex = rs . findColumn ( col ) ; while ( rs . next ( ) ) { String tmp = "" ; for ( int i = 1 ; i <= numberOfColumns ; i ++ ) { if ( i > 1 ) { tmp += "; " ; } tmp += rs . getString ( i ) ; } // end of for (int i=1; i<=numberOfColumns; i++) zc . add ( tmp , rs . getString ( colIndex ) ) ; } rs . close ( ) ; } catch ( SQLException e ) { System . out . println ( "SQL Exception: " + e . getMessage ( ) ) ; } finally { try { if ( stmt != null ) { stmt . close ( ) ; } } catch ( SQLException e ) { } } }
and the column values as values
284
6
154,072
private void fetchColumns ( ) { Vector temp = new Vector ( 20 ) ; Vector tempType = new Vector ( 20 ) ; try { if ( cConn == null ) { return ; } if ( dbmeta == null ) { dbmeta = cConn . getMetaData ( ) ; } ResultSet colList = dbmeta . getColumns ( null , null , tableName , "%" ) ; while ( colList . next ( ) ) { temp . addElement ( colList . getString ( "COLUMN_NAME" ) ) ; tempType . addElement ( new Short ( colList . getShort ( "DATA_TYPE" ) ) ) ; } colList . close ( ) ; } catch ( SQLException e ) { ZaurusEditor . printStatus ( "SQL Exception: " + e . getMessage ( ) ) ; } columns = new String [ temp . size ( ) ] ; temp . copyInto ( columns ) ; columnTypes = new short [ temp . size ( ) ] ; for ( int i = 0 ; i < columnTypes . length ; i ++ ) { columnTypes [ i ] = ( ( Short ) tempType . elementAt ( i ) ) . shortValue ( ) ; } }
fetch all column names
258
5
154,073
private String generateWhere ( String [ ] words , boolean allWords , boolean ignoreCase , boolean noMatchWhole ) { String result = "" ; // if all words must match use AND between the different conditions String join ; if ( allWords ) { join = " AND " ; } else { join = " OR " ; } // end of else for ( int wordInd = 0 ; wordInd < words . length ; wordInd ++ ) { String oneCondition = "" ; for ( int col = 0 ; col < columns . length ; col ++ ) { if ( oneCondition != "" ) { oneCondition += " OR " ; } if ( ignoreCase ) { if ( noMatchWhole ) { oneCondition += "LOWER(" + columns [ col ] + ") LIKE '%" + words [ wordInd ] . toLowerCase ( ) + "%'" ; } else { oneCondition += "LOWER(" + columns [ col ] + ") LIKE '" + words [ wordInd ] . toLowerCase ( ) + "'" ; } } else { if ( noMatchWhole ) { oneCondition += columns [ col ] + " LIKE '%" + words [ wordInd ] + "%'" ; } else { oneCondition += columns [ col ] + " LIKE '" + words [ wordInd ] + "'" ; } } } if ( result != "" ) { result += join ; } result += "(" + oneCondition + ")" ; } if ( result != "" ) { result = " WHERE " + result ; } // end of if (result != "") // System.out.println("result: "+result); return result ; }
generate the Where - condition for the words
342
9
154,074
private int getColIndex ( String name ) { for ( int i = 0 ; i < columns . length ; i ++ ) { if ( name . equals ( columns [ i ] ) ) { return i ; } // end of if (name.equals(columns[i])) } // end of for (int i=0; i<columns.length; i++) return - 1 ; }
answer the index of the column named name in the actual table
85
12
154,075
private int getColIndex ( String colName , String tabName ) { int ordPos = 0 ; try { if ( cConn == null ) { return - 1 ; } if ( dbmeta == null ) { dbmeta = cConn . getMetaData ( ) ; } ResultSet colList = dbmeta . getColumns ( null , null , tabName , colName ) ; colList . next ( ) ; ordPos = colList . getInt ( "ORDINAL_POSITION" ) ; colList . close ( ) ; } catch ( SQLException e ) { System . out . println ( "SQL Exception: " + e . getMessage ( ) ) ; } return ordPos - 1 ; }
answer the index of the column named colName in the table tabName
150
14
154,076
private int getConstraintIndex ( int colIndex ) { for ( int i = 0 ; i < imColIndex . length ; i ++ ) { for ( int j = 0 ; j < imColIndex [ i ] . length ; j ++ ) { if ( colIndex == imColIndex [ i ] [ j ] ) { return i ; } // end of if (col == imColIndex[i][j]) } // end of for (int j=0; j<imColIndex[i].length; j++) } // end of for (int i=0; i<imColIndex.length; i++) return - 1 ; }
answer - 1 if the column is not part of any constraint
138
12
154,077
private void showAktRow ( ) { try { pStmt . clearParameters ( ) ; for ( int i = 0 ; i < primaryKeys . length ; i ++ ) { pStmt . setObject ( i + 1 , resultRowPKs [ aktRowNr ] [ i ] ) ; } // end of for (int i=0; i<primaryKeys.length; i++) ResultSet rs = pStmt . executeQuery ( ) ; rs . next ( ) ; for ( int i = 0 ; i < columns . length ; i ++ ) { komponente [ i ] . setContent ( rs . getString ( i + 1 ) ) ; } // end of for (int i=0; i<primaryKeys.length; i++) rs . close ( ) ; } catch ( SQLException e ) { ZaurusEditor . printStatus ( "SQL Exception: " + e . getMessage ( ) ) ; } // end of try-catch for ( int i = 0 ; i < columns . length ; i ++ ) { komponente [ i ] . clearChanges ( ) ; } }
get and show the values of the actual row in the GUI
239
12
154,078
private void voltConvertBinaryLiteralOperandsToBigint ( ) { // Strange that CONCAT is an arithmetic operator. // You could imagine using it for VARBINARY, so // definitely don't convert its operands to BIGINT! assert ( opType != OpTypes . CONCAT ) ; for ( int i = 0 ; i < nodes . length ; ++ i ) { Expression e = nodes [ i ] ; ExpressionValue . voltMutateToBigintType ( e , this , i ) ; } }
A VoltDB extension to use X .. as a numeric value
110
12
154,079
public int findColumn ( String tableName , String columnName ) { // The namedJoinColumnExpressions are ExpressionColumn objects // for columns named in USING conditions. Each range variable // has a possibly empty list of these. If two range variables are // operands of a join with a USING condition, both get the same list // of USING columns. In our semantics the query // select T2.C from T1 join T2 using(C); // selects T2.C. This is not standard behavior, but it seems to // be common to mysql and postgresql. The query // select C from T1 join T2 using(C); // selects the C from T1 or T2, since the using clause says // they will have the same value. In the query // select C from T1 join T2 using(C), T3; // where T3 has a column named C, there is an ambiguity, since // the first join tree (T1 join T2 using(C)) has a column named C and // T3 has another C column. In this case we need the T1.C notation. // The query // select T1.C from T1 join T2 using(C), T3; // will select the C from the first join tree, and // select T3.C from T1 join T2 using(C), T3; // will select the C from the second join tree, which is just T3. // If we don't have a table name and there are some USING columns, // then look into them. If the name is in the USING columns, it // is not in this range variable. The function getColumnExpression // will fetch this using variable in another search. if ( namedJoinColumnExpressions != null && tableName == null && namedJoinColumnExpressions . containsKey ( columnName ) ) { return - 1 ; } if ( variables != null ) { return variables . getIndex ( columnName ) ; } else if ( columnAliases != null ) { return columnAliases . getIndex ( columnName ) ; } else { return rangeTable . findColumn ( columnName ) ; } }
Returns the index for the column given the column s table name and column name . If the table name is null there is no table name specified . For example in a query select C from T there is no table name so tableName would be null . In the query select T . C from T tableName would be the string T . Don t return any column found in a USING join condition .
455
80
154,080
void addIndexCondition ( Expression [ ] exprList , Index index , int colCount , boolean isJoin ) { // VoltDB extension if ( rangeIndex == index && isJoinIndex && ( ! isJoin ) && ( multiColumnCount > 0 ) && ( colCount == 0 ) ) { // This is one particular set of conditions which broke the classification of // ON and WHERE clauses. return ; } // End of VoltDB extension rangeIndex = index ; isJoinIndex = isJoin ; for ( int i = 0 ; i < colCount ; i ++ ) { Expression e = exprList [ i ] ; indexEndCondition = ExpressionLogical . andExpressions ( indexEndCondition , e ) ; } if ( colCount == 1 ) { indexCondition = exprList [ 0 ] ; } else { findFirstExpressions = exprList ; isMultiFindFirst = true ; multiColumnCount = colCount ; } }
Only multiple EQUAL conditions are used
189
7
154,081
public HsqlName getSubqueryTableName ( ) { HsqlName hsqlName = new HsqlName ( this , SqlInvariants . SYSTEM_SUBQUERY , false , SchemaObject . TABLE ) ; hsqlName . schema = SqlInvariants . SYSTEM_SCHEMA_HSQLNAME ; return hsqlName ; }
Same name string but different objects and serial number
76
9
154,082
static public HsqlName getAutoColumnName ( int i ) { if ( i < autoColumnNames . length ) { return autoColumnNames [ i ] ; } return new HsqlName ( staticManager , makeAutoColumnName ( "C_" , i ) , 0 , false ) ; }
Column index i is 0 based returns 1 based numbered column .
62
12
154,083
public String getString ( String key ) { String value = wrappedBundle . getString ( key ) ; if ( value . length ( ) < 1 ) { value = getStringFromFile ( key ) ; // For conciseness and sanity, get rid of all \r's so that \n // will definitively be our line breaks. if ( value . indexOf ( ' ' ) > - 1 ) value = value . replaceAll ( "\\Q\r\n" , "\n" ) . replaceAll ( "\\Q\r" , "\n" ) ; if ( value . length ( ) > 0 && value . charAt ( value . length ( ) - 1 ) == ' ' ) value = value . substring ( 0 , value . length ( ) - 1 ) ; } return RefCapablePropertyResourceBundle . toNativeLs ( value ) ; }
Returns value defined in this RefCapablePropertyResourceBundle s . properties file unless that value is empty . If the value in the . properties file is empty then this returns the entire contents of the referenced text file .
184
44
154,084
static private RefCapablePropertyResourceBundle getRef ( String baseName , ResourceBundle rb , ClassLoader loader ) { if ( ! ( rb instanceof PropertyResourceBundle ) ) throw new MissingResourceException ( "Found a Resource Bundle, but it is a " + rb . getClass ( ) . getName ( ) , PropertyResourceBundle . class . getName ( ) , null ) ; if ( allBundles . containsKey ( rb ) ) return ( RefCapablePropertyResourceBundle ) allBundles . get ( rb ) ; RefCapablePropertyResourceBundle newPRAFP = new RefCapablePropertyResourceBundle ( baseName , ( PropertyResourceBundle ) rb , loader ) ; allBundles . put ( rb , newPRAFP ) ; return newPRAFP ; }
Return a ref to a new or existing RefCapablePropertyResourceBundle or throw a MissingResourceException .
181
22
154,085
private static boolean checkPureColumnIndex ( Index index , int aggCol , List < AbstractExpression > filterExprs ) { boolean found = false ; // all left child of filterExprs must be of type TupleValueExpression in equality comparison for ( AbstractExpression expr : filterExprs ) { if ( expr . getExpressionType ( ) != ExpressionType . COMPARE_EQUAL ) { return false ; } if ( ! ( expr . getLeft ( ) instanceof TupleValueExpression ) ) { return false ; } if ( ( ( TupleValueExpression ) expr . getLeft ( ) ) . getColumnIndex ( ) == aggCol ) { found = true ; } } if ( found ) { return true ; } if ( index . getColumns ( ) . size ( ) > filterExprs . size ( ) ) { List < ColumnRef > indexedColRefs = CatalogUtil . getSortedCatalogItems ( index . getColumns ( ) , "index" ) ; if ( indexedColRefs . get ( filterExprs . size ( ) ) . getColumn ( ) . getIndex ( ) == aggCol ) { return true ; } } return false ; }
or all filters compose the complete set of prefix key components
258
11
154,086
public static Runnable writeHashinatorConfig ( InstanceId instId , String path , String nonce , int hostId , HashinatorSnapshotData hashData , boolean isTruncationSnapshot ) throws IOException { final File file = new VoltFile ( path , constructHashinatorConfigFilenameForNonce ( nonce , hostId ) ) ; if ( file . exists ( ) ) { if ( ! file . delete ( ) ) { if ( isTruncationSnapshot ) { VoltDB . crashLocalVoltDB ( "Unexpected exception while attempting to delete old hash file for truncation snapshot" ) ; } throw new IOException ( "Unable to replace existing hashinator config " + file ) ; } } boolean success = false ; try { final FileOutputStream fos = new FileOutputStream ( file ) ; ByteBuffer fileBuffer = hashData . saveToBuffer ( instId ) ; fos . getChannel ( ) . write ( fileBuffer ) ; success = true ; return new Runnable ( ) { @ Override public void run ( ) { try { fos . getChannel ( ) . force ( true ) ; } catch ( IOException e ) { if ( isTruncationSnapshot ) { VoltDB . crashLocalVoltDB ( "Unexpected exception while attempting to create hash file for truncation snapshot" , true , e ) ; } throw new RuntimeException ( e ) ; } finally { try { fos . close ( ) ; } catch ( IOException e ) { if ( isTruncationSnapshot ) { VoltDB . crashLocalVoltDB ( "Unexpected exception while attempting to create hash file for truncation snapshot" , true , e ) ; } throw new RuntimeException ( e ) ; } } } } ; } finally { if ( ! success ) { file . delete ( ) ; } } }
Write the hashinator config file for a snapshot
389
9
154,087
public static String parseNonceFromDigestFilename ( String filename ) { if ( filename == null || ! filename . endsWith ( ".digest" ) ) { throw new IllegalArgumentException ( "Bad digest filename: " + filename ) ; } return parseNonceFromSnapshotFilename ( filename ) ; }
Get the nonce from the filename of the digest file .
65
12
154,088
public static String parseNonceFromHashinatorConfigFilename ( String filename ) { if ( filename == null || ! filename . endsWith ( HASH_EXTENSION ) ) { throw new IllegalArgumentException ( "Bad hashinator config filename: " + filename ) ; } return parseNonceFromSnapshotFilename ( filename ) ; }
Get the nonce from the filename of the hashinator config file .
70
14
154,089
public static String parseNonceFromSnapshotFilename ( String filename ) { if ( filename == null ) { throw new IllegalArgumentException ( "Bad snapshot filename: " + filename ) ; } // For the snapshot catalog if ( filename . endsWith ( ".jar" ) ) { return filename . substring ( 0 , filename . indexOf ( ".jar" ) ) ; } // for everything else valid in new format or volt1.2 or earlier table files else if ( filename . indexOf ( "-" ) > 0 ) { return filename . substring ( 0 , filename . indexOf ( "-" ) ) ; } // volt 1.2 and earlier digest filename else if ( filename . endsWith ( ".digest" ) ) { return filename . substring ( 0 , filename . indexOf ( ".digest" ) ) ; } // Hashinator config filename. else if ( filename . endsWith ( HASH_EXTENSION ) ) { return filename . substring ( 0 , filename . indexOf ( HASH_EXTENSION ) ) ; } throw new IllegalArgumentException ( "Bad snapshot filename: " + filename ) ; }
Get the nonce from any snapshot - related file .
238
11
154,090
public static List < ByteBuffer > retrieveHashinatorConfigs ( String path , String nonce , int maxConfigs , VoltLogger logger ) throws IOException { VoltFile directory = new VoltFile ( path ) ; ArrayList < ByteBuffer > configs = new ArrayList < ByteBuffer > ( ) ; if ( directory . listFiles ( ) == null ) { return configs ; } for ( File file : directory . listFiles ( ) ) { if ( file . getName ( ) . startsWith ( nonce + "-host_" ) && file . getName ( ) . endsWith ( HASH_EXTENSION ) ) { byte [ ] rawData = new byte [ ( int ) file . length ( ) ] ; FileInputStream fis = null ; DataInputStream dis = null ; try { fis = new FileInputStream ( file ) ; dis = new DataInputStream ( fis ) ; dis . readFully ( rawData ) ; configs . add ( ByteBuffer . wrap ( rawData ) ) ; } finally { if ( dis != null ) { dis . close ( ) ; } if ( fis != null ) { fis . close ( ) ; } } } } return configs ; }
Read hashinator snapshots into byte buffers .
258
8
154,091
public static Runnable writeSnapshotCatalog ( String path , String nonce , boolean isTruncationSnapshot ) throws IOException { String filename = SnapshotUtil . constructCatalogFilenameForNonce ( nonce ) ; try { return VoltDB . instance ( ) . getCatalogContext ( ) . writeCatalogJarToFile ( path , filename , CatalogJarWriteMode . RECOVER ) ; } catch ( IOException ioe ) { if ( isTruncationSnapshot ) { VoltDB . crashLocalVoltDB ( "Unexpected exception while attempting to create Catalog file for truncation snapshot" , true , ioe ) ; } throw new IOException ( "Unable to write snapshot catalog to file: " + path + File . separator + filename , ioe ) ; } }
Write the current catalog associated with the database snapshot to the snapshot location
166
13
154,092
public static Runnable writeTerminusMarker ( final String nonce , final NodeSettings paths , final VoltLogger logger ) { final File f = new File ( paths . getVoltDBRoot ( ) , VoltDB . TERMINUS_MARKER ) ; return new Runnable ( ) { @ Override public void run ( ) { try ( PrintWriter pw = new PrintWriter ( new FileWriter ( f ) , true ) ) { pw . println ( nonce ) ; } catch ( IOException e ) { throw new RuntimeException ( "Failed to create .complete file for " + f . getName ( ) , e ) ; } } } ; }
Write the shutdown save snapshot terminus marker
144
8
154,093
public static void retrieveSnapshotFiles ( File directory , Map < String , Snapshot > namedSnapshotMap , FileFilter filter , boolean validate , SnapshotPathType stype , VoltLogger logger ) { NamedSnapshots namedSnapshots = new NamedSnapshots ( namedSnapshotMap , stype ) ; retrieveSnapshotFilesInternal ( directory , namedSnapshots , filter , validate , stype , logger , 0 ) ; }
Spider the provided directory applying the provided FileFilter . Optionally validate snapshot files . Return a summary of partition counts partition information files digests etc . that can be used to determine if a valid restore plan exists .
89
42
154,094
public static final String constructFilenameForTable ( Table table , String fileNonce , SnapshotFormat format , int hostId ) { String extension = ".vpt" ; if ( format == SnapshotFormat . CSV ) { extension = ".csv" ; } StringBuilder filename_builder = new StringBuilder ( fileNonce ) ; filename_builder . append ( "-" ) ; filename_builder . append ( table . getTypeName ( ) ) ; if ( ! table . getIsreplicated ( ) ) { filename_builder . append ( "-host_" ) ; filename_builder . append ( hostId ) ; } filename_builder . append ( extension ) ; //Volt partitioned table return filename_builder . toString ( ) ; }
Generates a Filename to the snapshot file for the given table .
155
14
154,095
public static void requestSnapshot ( final long clientHandle , final String path , final String nonce , final boolean blocking , final SnapshotFormat format , final SnapshotPathType stype , final String data , final SnapshotResponseHandler handler , final boolean notifyChanges ) { final SnapshotInitiationInfo snapInfo = new SnapshotInitiationInfo ( path , nonce , blocking , format , stype , data ) ; final SimpleClientResponseAdapter adapter = new SimpleClientResponseAdapter ( ClientInterface . SNAPSHOT_UTIL_CID , "SnapshotUtilAdapter" , true ) ; final LinkedBlockingQueue < ClientResponse > responses = new LinkedBlockingQueue < ClientResponse > ( ) ; adapter . registerCallback ( clientHandle , new SimpleClientResponseAdapter . Callback ( ) { @ Override public void handleResponse ( ClientResponse response ) { responses . offer ( response ) ; } } ) ; final SnapshotDaemon sd = VoltDB . instance ( ) . getClientInterface ( ) . getSnapshotDaemon ( ) ; Runnable work = new Runnable ( ) { @ Override public void run ( ) { ClientResponse response = null ; // abort if unable to succeed in 2 hours final long startTime = System . currentTimeMillis ( ) ; boolean hasRequested = false ; while ( System . currentTimeMillis ( ) - startTime <= TimeUnit . HOURS . toMillis ( 2 ) ) { try { if ( ! hasRequested ) { sd . createAndWatchRequestNode ( clientHandle , adapter , snapInfo , notifyChanges ) ; hasRequested = true ; } try { response = responses . poll ( TimeUnit . HOURS . toMillis ( 2 ) - ( System . currentTimeMillis ( ) - startTime ) , TimeUnit . MILLISECONDS ) ; if ( response == null ) { break ; } } catch ( InterruptedException e ) { VoltDB . crashLocalVoltDB ( "Should never happen" , true , e ) ; } VoltTable [ ] results = response . getResults ( ) ; if ( response . getStatus ( ) != ClientResponse . SUCCESS ) { break ; } else if ( isSnapshotInProgress ( results ) ) { // retry after a second Thread . sleep ( 1000 ) ; // Request again hasRequested = false ; continue ; } else if ( isSnapshotQueued ( results ) && notifyChanges ) { //Wait for an update on the queued state via ZK continue ; } else { // other errors are not recoverable break ; } } catch ( ForwardClientException e ) { //This happens when something goes wrong in the snapshot daemon //I think it will always be that there was an existing snapshot request //It should eventually terminate and then we can submit one. try { Thread . sleep ( 5000 ) ; } catch ( InterruptedException e1 ) { } new VoltLogger ( "SNAPSHOT" ) . warn ( "Partition detection is unable to submit a snapshot request " + "because one already exists. Retrying." ) ; continue ; } catch ( InterruptedException ignore ) { } } handler . handleResponse ( response ) ; } } ; // Use an executor service here to avoid explosion of threads??? ThreadFactory factory = CoreUtils . getThreadFactory ( "Snapshot Request - " + nonce ) ; Thread workThread = factory . newThread ( work ) ; workThread . start ( ) ; }
Request a new snapshot . It will retry for a couple of times . If it doesn t succeed in the specified time an error response will be sent to the response handler otherwise a success response will be passed to the handler .
727
45
154,096
public static ListenableFuture < SnapshotCompletionInterest . SnapshotCompletionEvent > watchSnapshot ( final String nonce ) { final SettableFuture < SnapshotCompletionInterest . SnapshotCompletionEvent > result = SettableFuture . create ( ) ; SnapshotCompletionInterest interest = new SnapshotCompletionInterest ( ) { @ Override public CountDownLatch snapshotCompleted ( SnapshotCompletionEvent event ) { if ( event . nonce . equals ( nonce ) && event . didSucceed ) { VoltDB . instance ( ) . getSnapshotCompletionMonitor ( ) . removeInterest ( this ) ; result . set ( event ) ; } return null ; } } ; VoltDB . instance ( ) . getSnapshotCompletionMonitor ( ) . addInterest ( interest ) ; return result ; }
Watch for the completion of a snapshot
173
7
154,097
public static HashinatorSnapshotData retrieveHashinatorConfig ( String path , String nonce , int hostId , VoltLogger logger ) throws IOException { HashinatorSnapshotData hashData = null ; String expectedFileName = constructHashinatorConfigFilenameForNonce ( nonce , hostId ) ; File [ ] files = new VoltFile ( path ) . listFiles ( ) ; if ( files != null ) { for ( File file : files ) { if ( file . getName ( ) . equals ( expectedFileName ) ) { hashData = new HashinatorSnapshotData ( ) ; hashData . restoreFromFile ( file ) ; break ; } } } if ( hashData == null ) { throw new IOException ( "Missing hashinator data in snapshot" ) ; } return hashData ; }
Retrieve hashinator config for restore .
168
8
154,098
public static String getRealPath ( SnapshotPathType stype , String path ) { if ( stype == SnapshotPathType . SNAP_CL ) { return VoltDB . instance ( ) . getCommandLogSnapshotPath ( ) ; } else if ( stype == SnapshotPathType . SNAP_AUTO ) { return VoltDB . instance ( ) . getSnapshotPath ( ) ; } return path ; }
Return path based on type if type is not CL or AUTO return provided path .
88
17
154,099
public void close ( ) throws SQLException { validate ( ) ; try { this . connection . rollback ( ) ; this . connection . clearWarnings ( ) ; this . connectionDefaults . setDefaults ( this . connection ) ; this . connection . reset ( ) ; fireCloseEvent ( ) ; } catch ( SQLException e ) { fireSqlExceptionEvent ( e ) ; throw e ; } }
Rolls the connection back resets the connection back to defaults clears warnings resets the connection on the server side and returns the connection to the pool .
90
30