idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
26,700
public void requestSaveInputImage ( ) { saveRequested = false ; switch ( inputMethod ) { case IMAGE : new Thread ( ( ) -> saveInputImage ( ) ) . start ( ) ; break ; case VIDEO : case WEBCAM : if ( streamPaused ) { saveInputImage ( ) ; } else { saveRequested = true ; } break ; } }
Makes a request that the input image be saved . This request might be carried out immediately or when then next image is processed .
79
26
26,701
@ Override public boolean generate ( List < AssociatedPair > dataSet , Se3_F64 model ) { if ( ! computeEssential . process ( dataSet , E ) ) return false ; // extract the possible motions decomposeE . decompose ( E ) ; selectBest . select ( decomposeE . getSolutions ( ) , dataSet , model ) ; return true ; }
Computes the camera motion from the set of observations . The motion is from the first into the second camera frame .
82
23
26,702
public void setCameraParameters ( float fx , float fy , float cx , float cy , int width , int height ) { this . fx = fx ; this . fy = fy ; this . cx = cx ; this . cy = cy ; derivX . reshape ( width , height ) ; derivY . reshape ( width , height ) ; // set these to the maximum possible size int N = width * height * imageType . getNumBands ( ) ; A . reshape ( N , 6 ) ; y . reshape ( N , 1 ) ; }
Specifies intrinsic camera parameters . Must be called .
123
10
26,703
public void setInterpolation ( double inputMin , double inputMax , double derivMin , double derivMax , InterpolationType type ) { interpI = FactoryInterpolation . createPixelS ( inputMin , inputMax , type , BorderType . EXTENDED , imageType . getImageClass ( ) ) ; interpDX = FactoryInterpolation . createPixelS ( derivMin , derivMax , type , BorderType . EXTENDED , derivType . getImageClass ( ) ) ; interpDY = FactoryInterpolation . createPixelS ( derivMin , derivMax , type , BorderType . EXTENDED , derivType . getImageClass ( ) ) ; }
Used to change interpolation method . Probably don t want to do this .
143
15
26,704
void setKeyFrame ( Planar < I > input , ImagePixelTo3D pixelTo3D ) { InputSanityCheck . checkSameShape ( derivX , input ) ; wrapI . wrap ( input ) ; keypixels . reset ( ) ; for ( int y = 0 ; y < input . height ; y ++ ) { for ( int x = 0 ; x < input . width ; x ++ ) { // See if there's a valid 3D point at this location if ( ! pixelTo3D . process ( x , y ) ) { continue ; } float P_x = ( float ) pixelTo3D . getX ( ) ; float P_y = ( float ) pixelTo3D . getY ( ) ; float P_z = ( float ) pixelTo3D . getZ ( ) ; float P_w = ( float ) pixelTo3D . getW ( ) ; // skip point if it's at infinity or has a negative value if ( P_w <= 0 ) continue ; // save the results Pixel p = keypixels . grow ( ) ; p . valid = true ; wrapI . get ( x , y , p . bands ) ; p . x = x ; p . y = y ; p . p3 . set ( P_x / P_w , P_y / P_w , P_z / P_w ) ; } } }
Set s the keyframe . This is the image which motion is estimated relative to . The 3D location of points in the keyframe must be known .
297
31
26,705
public double computeFeatureDiversity ( Se3_F32 keyToCurrent ) { diversity . reset ( ) ; for ( int i = 0 ; i < keypixels . size ( ) ; i ++ ) { Pixel p = keypixels . data [ i ] ; if ( ! p . valid ) continue ; SePointOps_F32 . transform ( keyToCurrent , p . p3 , S ) ; diversity . addPoint ( S . x , S . y , S . z ) ; } diversity . process ( ) ; return diversity . getSpread ( ) ; }
Computes the diversity of valid pixels in keyframe to the location in the current frame .
120
18
26,706
public boolean estimateMotion ( Planar < I > input , Se3_F32 hintKeyToInput ) { InputSanityCheck . checkSameShape ( derivX , input ) ; initMotion ( input ) ; keyToCurrent . set ( hintKeyToInput ) ; boolean foundSolution = false ; float previousError = Float . MAX_VALUE ; for ( int i = 0 ; i < maxIterations ; i ++ ) { constructLinearSystem ( input , keyToCurrent ) ; if ( ! solveSystem ( ) ) break ; if ( Math . abs ( previousError - errorOptical ) / previousError < convergeTol ) break ; else { // update the estimated motion from the computed twist previousError = errorOptical ; keyToCurrent . concat ( motionTwist , tmp ) ; keyToCurrent . set ( tmp ) ; foundSolution = true ; } } return foundSolution ; }
Estimates the motion relative to the key frame .
187
10
26,707
void initMotion ( Planar < I > input ) { if ( solver == null ) { solver = LinearSolverFactory_DDRM . qr ( input . width * input . height * input . getNumBands ( ) , 6 ) ; } // compute image derivative and setup interpolation functions computeD . process ( input , derivX , derivY ) ; }
Initialize motion related data structures
80
6
26,708
public boolean process ( List < AssociatedPair > points , FastQueue < DMatrixRMaj > solutions ) { if ( points . size ( ) != 5 ) throw new IllegalArgumentException ( "Exactly 5 points are required, not " + points . size ( ) ) ; solutions . reset ( ) ; // Computes the 4-vector span which contains E. See equations 7-9 computeSpan ( points ) ; // Construct a linear system based on the 10 constraint equations. See equations 5,6, and 10 . helper . setNullSpace ( X , Y , Z , W ) ; helper . setupA1 ( A1 ) ; helper . setupA2 ( A2 ) ; // instead of Gauss-Jordan elimination LU decomposition is used to solve the system solver . setA ( A1 ) ; solver . solve ( A2 , C ) ; // construct the z-polynomial matrix. Equations 11-14 helper . setDeterminantVectors ( C ) ; helper . extractPolynomial ( poly . getCoefficients ( ) ) ; if ( ! findRoots . process ( poly ) ) return false ; for ( Complex_F64 c : findRoots . getRoots ( ) ) { if ( ! c . isReal ( ) ) continue ; solveForXandY ( c . real ) ; DMatrixRMaj E = solutions . grow ( ) ; for ( int i = 0 ; i < 9 ; i ++ ) { E . data [ i ] = x * X [ i ] + y * Y [ i ] + z * Z [ i ] + W [ i ] ; } } return true ; }
Computes the essential matrix from point correspondences .
352
10
26,709
private void solveForXandY ( double z ) { this . z = z ; // solve for x and y using the first two rows of B tmpA . data [ 0 ] = ( ( helper . K00 * z + helper . K01 ) * z + helper . K02 ) * z + helper . K03 ; tmpA . data [ 1 ] = ( ( helper . K04 * z + helper . K05 ) * z + helper . K06 ) * z + helper . K07 ; tmpY . data [ 0 ] = ( ( ( helper . K08 * z + helper . K09 ) * z + helper . K10 ) * z + helper . K11 ) * z + helper . K12 ; tmpA . data [ 2 ] = ( ( helper . L00 * z + helper . L01 ) * z + helper . L02 ) * z + helper . L03 ; tmpA . data [ 3 ] = ( ( helper . L04 * z + helper . L05 ) * z + helper . L06 ) * z + helper . L07 ; tmpY . data [ 1 ] = ( ( ( helper . L08 * z + helper . L09 ) * z + helper . L10 ) * z + helper . L11 ) * z + helper . L12 ; tmpA . data [ 4 ] = ( ( helper . M00 * z + helper . M01 ) * z + helper . M02 ) * z + helper . M03 ; tmpA . data [ 5 ] = ( ( helper . M04 * z + helper . M05 ) * z + helper . M06 ) * z + helper . M07 ; tmpY . data [ 2 ] = ( ( ( helper . M08 * z + helper . M09 ) * z + helper . M10 ) * z + helper . M11 ) * z + helper . M12 ; CommonOps_DDRM . scale ( - 1 , tmpY ) ; CommonOps_DDRM . solve ( tmpA , tmpY , tmpX ) ; this . x = tmpX . get ( 0 , 0 ) ; this . y = tmpX . get ( 1 , 0 ) ; }
Once z is known then x and y can be solved for using the B matrix
464
16
26,710
public boolean checkPixel ( Point2D_F64 left , Point2D_F64 right ) { leftImageToRect . compute ( left . x , left . y , rectLeft ) ; rightImageToRect . compute ( right . x , right . y , rectRight ) ; return checkRectified ( rectLeft , rectRight ) ; }
Checks to see if the observations from the left and right camera are consistent . Observations are assumed to be in the original image pixel coordinates .
73
29
26,711
public boolean checkRectified ( Point2D_F64 left , Point2D_F64 right ) { // rectifications should make them appear along the same y-coordinate/epipolar line if ( Math . abs ( left . y - right . y ) > toleranceY ) return false ; // features in the right camera should appear left of features in the image image return right . x <= left . x + toleranceX ; }
Checks to see if the observations from the left and right camera are consistent . Observations are assumed to be in the rectified image pixel coordinates .
90
30
26,712
public static < T extends ImageGray < T > > DescribePointBriefSO < T > briefso ( BinaryCompareDefinition_I32 definition , BlurFilter < T > filterBlur ) { Class < T > imageType = filterBlur . getInputType ( ) . getImageClass ( ) ; InterpolatePixelS < T > interp = FactoryInterpolation . bilinearPixelS ( imageType , BorderType . EXTENDED ) ; return new DescribePointBriefSO <> ( definition , filterBlur , interp ) ; }
todo remove filterBlur for all BRIEF change to radius sigma type
119
17
26,713
public boolean checkVariance ( ImageRectangle r ) { double sigma2 = computeVariance ( r . x0 , r . y0 , r . x1 , r . y1 ) ; return sigma2 >= thresholdLower ; }
Performs variance test at the specified rectangle
51
8
26,714
protected double computeVariance ( int x0 , int y0 , int x1 , int y1 ) { // can use unsafe operations here since x0 > 0 and y0 > 0 double square = GIntegralImageOps . block_unsafe ( integralSq , x0 - 1 , y0 - 1 , x1 - 1 , y1 - 1 ) ; double area = ( x1 - x0 ) * ( y1 - y0 ) ; double mean = GIntegralImageOps . block_unsafe ( integral , x0 - 1 , y0 - 1 , x1 - 1 , y1 - 1 ) / area ; return square / area - mean * mean ; }
Computes the variance inside the specified rectangle . x0 and y0 must be &gt ; 0 .
146
21
26,715
protected double computeVarianceSafe ( int x0 , int y0 , int x1 , int y1 ) { // can use unsafe operations here since x0 > 0 and y0 > 0 double square = GIntegralImageOps . block_zero ( integralSq , x0 - 1 , y0 - 1 , x1 - 1 , y1 - 1 ) ; double area = ( x1 - x0 ) * ( y1 - y0 ) ; double mean = GIntegralImageOps . block_zero ( integral , x0 - 1 , y0 - 1 , x1 - 1 , y1 - 1 ) / area ; return square / area - mean * mean ; }
Computes the variance inside the specified rectangle .
145
9
26,716
public static void transformSq ( final GrayU8 input , final GrayS64 transformed ) { int indexSrc = input . startIndex ; int indexDst = transformed . startIndex ; int end = indexSrc + input . width ; long total = 0 ; for ( ; indexSrc < end ; indexSrc ++ ) { int value = input . data [ indexSrc ] & 0xFF ; transformed . data [ indexDst ++ ] = total += value * value ; } for ( int y = 1 ; y < input . height ; y ++ ) { indexSrc = input . startIndex + input . stride * y ; indexDst = transformed . startIndex + transformed . stride * y ; int indexPrev = indexDst - transformed . stride ; end = indexSrc + input . width ; total = 0 ; for ( ; indexSrc < end ; indexSrc ++ ) { int value = input . data [ indexSrc ] & 0xFF ; total += value * value ; transformed . data [ indexDst ++ ] = transformed . data [ indexPrev ++ ] + total ; } } }
Integral image of pixel value squared . integer
238
9
26,717
public static void transformSq ( final GrayF32 input , final GrayF64 transformed ) { int indexSrc = input . startIndex ; int indexDst = transformed . startIndex ; int end = indexSrc + input . width ; double total = 0 ; for ( ; indexSrc < end ; indexSrc ++ ) { float value = input . data [ indexSrc ] ; transformed . data [ indexDst ++ ] = total += value * value ; } for ( int y = 1 ; y < input . height ; y ++ ) { indexSrc = input . startIndex + input . stride * y ; indexDst = transformed . startIndex + transformed . stride * y ; int indexPrev = indexDst - transformed . stride ; end = indexSrc + input . width ; total = 0 ; for ( ; indexSrc < end ; indexSrc ++ ) { float value = input . data [ indexSrc ] ; total += value * value ; transformed . data [ indexDst ++ ] = transformed . data [ indexPrev ++ ] + total ; } } }
Integral image of pixel value squared . floating point
230
10
26,718
private int addView ( DMatrixRMaj P , Point2D_F64 a , int index ) { final double sx = stats . stdX , sy = stats . stdY ; // final double cx = stats.meanX, cy = stats.meanY; // Easier to read the code when P is broken up this way double r11 = P . data [ 0 ] , r12 = P . data [ 1 ] , r13 = P . data [ 2 ] , r14 = P . data [ 3 ] ; double r21 = P . data [ 4 ] , r22 = P . data [ 5 ] , r23 = P . data [ 6 ] , r24 = P . data [ 7 ] ; double r31 = P . data [ 8 ] , r32 = P . data [ 9 ] , r33 = P . data [ 10 ] , r34 = P . data [ 11 ] ; // These rows are derived by applying the scaling matrix to pixels and camera matrix // px = (a.x/sx - cx/sx) // A[0,0] = a.x*r31 - r11 (before normalization) // A[0,0] = px*r31 - (r11-cx*r31)/sx (after normalization) // first row A . data [ index ++ ] = ( a . x * r31 - r11 ) / sx ; A . data [ index ++ ] = ( a . x * r32 - r12 ) / sx ; A . data [ index ++ ] = ( a . x * r33 - r13 ) / sx ; A . data [ index ++ ] = ( a . x * r34 - r14 ) / sx ; // second row A . data [ index ++ ] = ( a . y * r31 - r21 ) / sy ; A . data [ index ++ ] = ( a . y * r32 - r22 ) / sy ; A . data [ index ++ ] = ( a . y * r33 - r23 ) / sy ; A . data [ index ++ ] = ( a . y * r34 - r24 ) / sy ; return index ; }
Adds a view to the A matrix . Computed using cross product .
470
14
26,719
protected void detectionCascade ( FastQueue < ImageRectangle > cascadeRegions ) { // initialize data structures success = false ; ambiguous = false ; best = null ; candidateDetections . reset ( ) ; localMaximums . reset ( ) ; ambiguousRegions . clear ( ) ; storageMetric . reset ( ) ; storageIndexes . reset ( ) ; storageRect . clear ( ) ; fernRegions . clear ( ) ; fernInfo . reset ( ) ; int totalP = 0 ; int totalN = 0 ; // Run through all candidate regions, ignore ones without enough variance, compute // the fern for each one TldRegionFernInfo info = fernInfo . grow ( ) ; for ( int i = 0 ; i < cascadeRegions . size ; i ++ ) { ImageRectangle region = cascadeRegions . get ( i ) ; if ( ! variance . checkVariance ( region ) ) { continue ; } info . r = region ; if ( fern . lookupFernPN ( info ) ) { totalP += info . sumP ; totalN += info . sumN ; info = fernInfo . grow ( ) ; } } fernInfo . removeTail ( ) ; // avoid overflow errors in the future by re-normalizing the Fern detector if ( totalP > 0x0fffffff ) fern . renormalizeP ( ) ; if ( totalN > 0x0fffffff ) fern . renormalizeN ( ) ; // Select the ferns with the highest likelihood selectBestRegionsFern ( totalP , totalN ) ; // From the remaining regions, score using the template algorithm computeTemplateConfidence ( ) ; if ( candidateDetections . size == 0 ) { return ; } // use non-maximum suppression to reduce the number of candidates nonmax . process ( candidateDetections , localMaximums ) ; best = selectBest ( ) ; if ( best != null ) { ambiguous = checkAmbiguous ( best ) ; success = true ; } }
Detects the object inside the image . Eliminates candidate regions using a cascade of tests
422
17
26,720
protected void computeTemplateConfidence ( ) { double max = 0 ; for ( int i = 0 ; i < fernRegions . size ( ) ; i ++ ) { ImageRectangle region = fernRegions . get ( i ) ; double confidence = template . computeConfidence ( region ) ; max = Math . max ( max , confidence ) ; if ( confidence < config . confidenceThresholdUpper ) continue ; TldRegion r = candidateDetections . grow ( ) ; r . connections = 0 ; r . rect . set ( region ) ; r . confidence = confidence ; } }
Computes the confidence for all the regions which pass the fern test
124
14
26,721
protected void selectBestRegionsFern ( double totalP , double totalN ) { for ( int i = 0 ; i < fernInfo . size ; i ++ ) { TldRegionFernInfo info = fernInfo . get ( i ) ; double probP = info . sumP / totalP ; double probN = info . sumN / totalN ; // only consider regions with a higher P likelihood if ( probP > probN ) { // reward regions with a large difference between the P and N values storageMetric . add ( - ( probP - probN ) ) ; storageRect . add ( info . r ) ; } } // Select the N regions with the highest fern probability if ( config . maximumCascadeConsider < storageMetric . size ) { int N = Math . min ( config . maximumCascadeConsider , storageMetric . size ) ; storageIndexes . resize ( storageMetric . size ) ; QuickSelect . selectIndex ( storageMetric . data , N - 1 , storageMetric . size , storageIndexes . data ) ; for ( int i = 0 ; i < N ; i ++ ) { fernRegions . add ( storageRect . get ( storageIndexes . get ( i ) ) ) ; } } else { fernRegions . addAll ( storageRect ) ; } }
compute the probability that each region is the target conditional upon this image the sumP and sumN are needed for image conditional probability
283
26
26,722
public void setImage ( ImagePyramid < InputImage > image , DerivativeImage [ ] derivX , DerivativeImage [ ] derivY ) { if ( image . getNumLayers ( ) != derivX . length || image . getNumLayers ( ) != derivY . length ) throw new IllegalArgumentException ( "Number of layers does not match." ) ; this . image = image ; this . derivX = derivX ; this . derivY = derivY ; }
Sets the current input images for the tracker to use .
103
12
26,723
public void setImage ( ImagePyramid < InputImage > image ) { this . image = image ; this . derivX = null ; this . derivY = null ; }
Only sets the image pyramid . The derivatives are set to null . Only use this when tracking .
36
19
26,724
public static void equalize ( int histogram [ ] , int transform [ ] ) { int sum = 0 ; for ( int i = 0 ; i < histogram . length ; i ++ ) { transform [ i ] = sum += histogram [ i ] ; } int maxValue = histogram . length - 1 ; for ( int i = 0 ; i < histogram . length ; i ++ ) { transform [ i ] = ( transform [ i ] * maxValue ) / sum ; } }
Computes a transformation table which will equalize the provided histogram . An equalized histogram spreads the weight across the whole spectrum of values . Often used to make dim images easier for people to see .
103
41
26,725
public static void sharpen8 ( GrayU8 input , GrayU8 output ) { InputSanityCheck . checkSameShape ( input , output ) ; if ( BoofConcurrency . USE_CONCURRENT ) { ImplEnhanceFilter_MT . sharpenInner8 ( input , output , 0 , 255 ) ; ImplEnhanceFilter_MT . sharpenBorder8 ( input , output , 0 , 255 ) ; } else { ImplEnhanceFilter . sharpenInner8 ( input , output , 0 , 255 ) ; ImplEnhanceFilter . sharpenBorder8 ( input , output , 0 , 255 ) ; } }
Applies a Laplacian - 8 based sharpen filter to the image .
133
17
26,726
protected void updateTrackLocation ( SetTrackInfo < Desc > info , FastQueue < AssociatedIndex > matches ) { info . matches . resize ( matches . size ) ; for ( int i = 0 ; i < matches . size ; i ++ ) { info . matches . get ( i ) . set ( matches . get ( i ) ) ; } tracksActive . clear ( ) ; for ( int i = 0 ; i < info . matches . size ; i ++ ) { AssociatedIndex indexes = info . matches . data [ i ] ; PointTrack track = info . tracks . get ( indexes . src ) ; Point2D_F64 loc = info . locDst . data [ indexes . dst ] ; track . set ( loc . x , loc . y ) ; tracksActive . add ( track ) ; } }
Update each track s location only and not its description . Update the active list too
169
16
26,727
public static int hamming ( TupleDesc_B a , TupleDesc_B b ) { int score = 0 ; final int N = a . data . length ; for ( int i = 0 ; i < N ; i ++ ) { score += hamming ( a . data [ i ] ^ b . data [ i ] ) ; } return score ; }
Computes the hamming distance between two binary feature descriptors
76
12
26,728
public static void derivX_F32 ( GrayF32 orig , GrayF32 derivX ) { final float [ ] data = orig . data ; final float [ ] imgX = derivX . data ; final int width = orig . getWidth ( ) ; final int height = orig . getHeight ( ) ; for ( int y = 0 ; y < height ; y ++ ) { int index = width * y + 1 ; int endX = index + width - 2 ; int endXAlt = endX - ( width - 2 ) % 3 ; float x0 = data [ index - 1 ] ; float x1 = data [ index ] ; for ( ; index < endXAlt ; ) { float x2 = data [ index + 1 ] ; imgX [ index ++ ] = ( x2 - x0 ) * 0.5f ; x0 = data [ index + 1 ] ; imgX [ index ++ ] = ( x0 - x1 ) * 0.5f ; x1 = data [ index + 1 ] ; imgX [ index ++ ] = ( x1 - x2 ) * 0.5f ; } for ( ; index < endX ; index ++ ) { imgX [ index ] = ( data [ index + 1 ] - data [ index - 1 ] ) * 0.5f ; } } }
Can only be used with images that are NOT sub - images .
280
13
26,729
private void pruneTracks ( SetTrackInfo < Desc > info , GrowQueue_I32 unassociated ) { if ( unassociated . size > maxInactiveTracks ) { // make the first N elements the ones which will be dropped int numDrop = unassociated . size - maxInactiveTracks ; for ( int i = 0 ; i < numDrop ; i ++ ) { int selected = rand . nextInt ( unassociated . size - i ) + i ; int a = unassociated . get ( i ) ; unassociated . data [ i ] = unassociated . data [ selected ] ; unassociated . data [ selected ] = a ; } List < PointTrack > dropList = new ArrayList <> ( ) ; for ( int i = 0 ; i < numDrop ; i ++ ) { dropList . add ( info . tracks . get ( unassociated . get ( i ) ) ) ; } for ( int i = 0 ; i < dropList . size ( ) ; i ++ ) { dropTrack ( dropList . get ( i ) ) ; } } }
If there are too many unassociated tracks randomly select some of those tracks and drop them
226
17
26,730
protected void putIntoSrcList ( SetTrackInfo < Desc > info ) { // make sure isAssociated is large enough if ( info . isAssociated . length < info . tracks . size ( ) ) { info . isAssociated = new boolean [ info . tracks . size ( ) ] ; } info . featSrc . reset ( ) ; info . locSrc . reset ( ) ; for ( int i = 0 ; i < info . tracks . size ( ) ; i ++ ) { PointTrack t = info . tracks . get ( i ) ; Desc desc = t . getDescription ( ) ; info . featSrc . add ( desc ) ; info . locSrc . add ( t ) ; info . isAssociated [ i ] = false ; } }
Put existing tracks into source list for association
159
8
26,731
@ Override public void spawnTracks ( ) { for ( int setIndex = 0 ; setIndex < sets . length ; setIndex ++ ) { SetTrackInfo < Desc > info = sets [ setIndex ] ; // setup data structures if ( info . isAssociated . length < info . featDst . size ) { info . isAssociated = new boolean [ info . featDst . size ] ; } // see which features are associated in the dst list for ( int i = 0 ; i < info . featDst . size ; i ++ ) { info . isAssociated [ i ] = false ; } for ( int i = 0 ; i < info . matches . size ; i ++ ) { info . isAssociated [ info . matches . data [ i ] . dst ] = true ; } // create new tracks from latest unassociated detected features for ( int i = 0 ; i < info . featDst . size ; i ++ ) { if ( info . isAssociated [ i ] ) continue ; Point2D_F64 loc = info . locDst . get ( i ) ; addNewTrack ( setIndex , loc . x , loc . y , info . featDst . get ( i ) ) ; } } }
Takes the current crop of detected features and makes them the keyframe
258
14
26,732
protected PointTrack addNewTrack ( int setIndex , double x , double y , Desc desc ) { PointTrack p = getUnused ( ) ; p . set ( x , y ) ; ( ( Desc ) p . getDescription ( ) ) . setTo ( desc ) ; if ( checkValidSpawn ( setIndex , p ) ) { p . setId = setIndex ; p . featureId = featureID ++ ; sets [ setIndex ] . tracks . add ( p ) ; tracksNew . add ( p ) ; tracksActive . add ( p ) ; tracksAll . add ( p ) ; return p ; } else { unused . add ( p ) ; return null ; } }
Adds a new track given its location and description
143
9
26,733
protected PointTrack getUnused ( ) { PointTrack p ; if ( unused . size ( ) > 0 ) { p = unused . remove ( unused . size ( ) - 1 ) ; } else { p = new PointTrack ( ) ; p . setDescription ( manager . createDescription ( ) ) ; } return p ; }
Returns an unused track . If there are no unused tracks then it creates a ne one .
68
18
26,734
@ Override public boolean dropTrack ( PointTrack track ) { if ( ! tracksAll . remove ( track ) ) return false ; if ( ! sets [ track . setId ] . tracks . remove ( track ) ) { return false ; } // the track may or may not be in the active list tracksActive . remove ( track ) ; tracksInactive . remove ( track ) ; // it must be in the all list // recycle the data unused . add ( track ) ; return true ; }
Remove from active list and mark so that it is dropped in the next cycle
102
15
26,735
private void addRodriguesJacobian ( DMatrixRMaj Rj , Point3D_F64 worldPt , Point3D_F64 cameraPt ) { // (1/z)*dot(R)*X double Rx = ( Rj . data [ 0 ] * worldPt . x + Rj . data [ 1 ] * worldPt . y + Rj . data [ 2 ] * worldPt . z ) / cameraPt . z ; double Ry = ( Rj . data [ 3 ] * worldPt . x + Rj . data [ 4 ] * worldPt . y + Rj . data [ 5 ] * worldPt . z ) / cameraPt . z ; // dot(z)/(z^2) double zDot_div_z2 = ( Rj . data [ 6 ] * worldPt . x + Rj . data [ 7 ] * worldPt . y + Rj . data [ 8 ] * worldPt . z ) / ( cameraPt . z * cameraPt . z ) ; output [ indexX ++ ] = - zDot_div_z2 * cameraPt . x + Rx ; output [ indexY ++ ] = - zDot_div_z2 * cameraPt . y + Ry ; }
Adds to the Jacobian matrix using the derivative from a Rodrigues parameter .
284
15
26,736
private void addTranslationJacobian ( Point3D_F64 cameraPt ) { double divZ = 1.0 / cameraPt . z ; double divZ2 = 1.0 / ( cameraPt . z * cameraPt . z ) ; // partial T.x output [ indexX ++ ] = divZ ; output [ indexY ++ ] = 0 ; // partial T.y output [ indexX ++ ] = 0 ; output [ indexY ++ ] = divZ ; // partial T.z output [ indexX ++ ] = - cameraPt . x * divZ2 ; output [ indexY ++ ] = - cameraPt . y * divZ2 ; }
Derivative for translation element
145
6
26,737
private void addTranslationJacobian ( DMatrixRMaj R , Point3D_F64 cameraPt ) { double z = cameraPt . z ; double z2 = z * z ; // partial T.x output [ indexX ++ ] = R . get ( 0 , 0 ) / cameraPt . z - R . get ( 2 , 0 ) / z2 * cameraPt . x ; output [ indexY ++ ] = R . get ( 1 , 0 ) / cameraPt . z - R . get ( 2 , 0 ) / z2 * cameraPt . y ; // partial T.y output [ indexX ++ ] = R . get ( 0 , 1 ) / cameraPt . z - R . get ( 2 , 1 ) / z2 * cameraPt . x ; output [ indexY ++ ] = R . get ( 1 , 1 ) / cameraPt . z - R . get ( 2 , 1 ) / z2 * cameraPt . y ; // partial T.z output [ indexX ++ ] = R . get ( 0 , 2 ) / cameraPt . z - R . get ( 2 , 2 ) / z2 * cameraPt . x ; output [ indexY ++ ] = R . get ( 1 , 2 ) / cameraPt . z - R . get ( 2 , 2 ) / z2 * cameraPt . y ; }
The translation vector is now multiplied by 3x3 matrix R . The components of T are no longer decoupled .
298
24
26,738
public static float [ ] subbandAbsVal ( GrayF32 subband , float [ ] coef ) { if ( coef == null ) { coef = new float [ subband . width * subband . height ] ; } int i = 0 ; for ( int y = 0 ; y < subband . height ; y ++ ) { int index = subband . startIndex + subband . stride * y ; int end = index + subband . width ; for ( ; index < end ; index ++ ) { coef [ i ++ ] = Math . abs ( subband . data [ index ] ) ; } } return coef ; }
Computes the absolute value of each element in the subband image are places it into coef
136
19
26,739
public static < T extends ImageBase < T > > BlurStorageFilter < T > median ( ImageType < T > type , int radius ) { return new BlurStorageFilter <> ( "median" , type , radius ) ; }
Creates a median filter for the specified image type .
51
11
26,740
public static < T extends ImageBase < T > > BlurStorageFilter < T > mean ( ImageType < T > type , int radius ) { return new BlurStorageFilter <> ( "mean" , type , radius ) ; }
Creates a mean filter for the specified image type .
50
11
26,741
public static < T extends ImageBase < T > > BlurStorageFilter < T > gaussian ( ImageType < T > type , double sigma , int radius ) { return new BlurStorageFilter <> ( "gaussian" , type , sigma , radius ) ; }
Creates a Gaussian filter for the specified image type .
59
12
26,742
public void initialize ( T image , int x0 , int y0 , int regionWidth , int regionHeight ) { this . imageWidth = image . width ; this . imageHeight = image . height ; setTrackLocation ( x0 , y0 , regionWidth , regionHeight ) ; initialLearning ( image ) ; }
Initializes tracking around the specified rectangle region
66
8
26,743
public void setTrackLocation ( int x0 , int y0 , int regionWidth , int regionHeight ) { if ( imageWidth < regionWidth || imageHeight < regionHeight ) throw new IllegalArgumentException ( "Track region is larger than input image: " + regionWidth + " " + regionHeight ) ; regionOut . width = regionWidth ; regionOut . height = regionHeight ; // adjust for padding int w = ( int ) ( regionWidth * ( 1 + padding ) ) ; int h = ( int ) ( regionHeight * ( 1 + padding ) ) ; int cx = x0 + regionWidth / 2 ; int cy = y0 + regionHeight / 2 ; // save the track location this . regionTrack . width = w ; this . regionTrack . height = h ; this . regionTrack . x0 = cx - w / 2 ; this . regionTrack . y0 = cy - h / 2 ; stepX = ( w - 1 ) / ( float ) ( workRegionSize - 1 ) ; stepY = ( h - 1 ) / ( float ) ( workRegionSize - 1 ) ; updateRegionOut ( ) ; }
Used to change the track s location . If this method is used it is assumed that tracking is active and that the appearance of the target has not changed
239
30
26,744
protected void initialLearning ( T image ) { // get subwindow at current estimated target position, to train classifier get_subwindow ( image , template ) ; // Kernel Regularized Least-Squares, calculate alphas (in Fourier domain) // k = dense_gauss_kernel(sigma, x); dense_gauss_kernel ( sigma , template , template , k ) ; fft . forward ( k , kf ) ; // new_alphaf = yf ./ (fft2(k) + lambda); %(Eq. 7) computeAlphas ( gaussianWeightDFT , kf , lambda , alphaf ) ; }
Learn the target s appearance .
142
6
26,745
protected static void computeCosineWindow ( GrayF64 cosine ) { double cosX [ ] = new double [ cosine . width ] ; for ( int x = 0 ; x < cosine . width ; x ++ ) { cosX [ x ] = 0.5 * ( 1 - Math . cos ( 2.0 * Math . PI * x / ( cosine . width - 1 ) ) ) ; } for ( int y = 0 ; y < cosine . height ; y ++ ) { int index = cosine . startIndex + y * cosine . stride ; double cosY = 0.5 * ( 1 - Math . cos ( 2.0 * Math . PI * y / ( cosine . height - 1 ) ) ) ; for ( int x = 0 ; x < cosine . width ; x ++ ) { cosine . data [ index ++ ] = cosX [ x ] * cosY ; } } }
Computes the cosine window
196
6
26,746
protected void computeGaussianWeights ( int width ) { // desired output (gaussian shaped), bandwidth proportional to target size double output_sigma = Math . sqrt ( width * width ) * output_sigma_factor ; double left = - 0.5 / ( output_sigma * output_sigma ) ; int radius = width / 2 ; for ( int y = 0 ; y < gaussianWeight . height ; y ++ ) { int index = gaussianWeight . startIndex + y * gaussianWeight . stride ; double ry = y - radius ; for ( int x = 0 ; x < width ; x ++ ) { double rx = x - radius ; gaussianWeight . data [ index ++ ] = Math . exp ( left * ( ry * ry + rx * rx ) ) ; } } fft . forward ( gaussianWeight , gaussianWeightDFT ) ; }
Computes the weights used in the gaussian kernel
193
10
26,747
public void performTracking ( T image ) { if ( image . width != imageWidth || image . height != imageHeight ) throw new IllegalArgumentException ( "Tracking image size is not the same as " + "input image. Expected " + imageWidth + " x " + imageHeight ) ; updateTrackLocation ( image ) ; if ( interp_factor != 0 ) performLearning ( image ) ; }
Search for the track in the image and
87
8
26,748
protected void updateTrackLocation ( T image ) { get_subwindow ( image , templateNew ) ; // calculate response of the classifier at all locations // matlab: k = dense_gauss_kernel(sigma, x, z); dense_gauss_kernel ( sigma , templateNew , template , k ) ; fft . forward ( k , kf ) ; // response = real(ifft2(alphaf .* fft2(k))); %(Eq. 9) DiscreteFourierTransformOps . multiplyComplex ( alphaf , kf , tmpFourier0 ) ; fft . inverse ( tmpFourier0 , response ) ; // find the pixel with the largest response int N = response . width * response . height ; int indexBest = - 1 ; double valueBest = - 1 ; for ( int i = 0 ; i < N ; i ++ ) { double v = response . data [ i ] ; if ( v > valueBest ) { valueBest = v ; indexBest = i ; } } int peakX = indexBest % response . width ; int peakY = indexBest / response . width ; // sub-pixel peak estimation subpixelPeak ( peakX , peakY ) ; // peak in region's coordinate system float deltaX = ( peakX + offX ) - templateNew . width / 2 ; float deltaY = ( peakY + offY ) - templateNew . height / 2 ; // convert peak location into image coordinate system regionTrack . x0 = regionTrack . x0 + deltaX * stepX ; regionTrack . y0 = regionTrack . y0 + deltaY * stepY ; updateRegionOut ( ) ; }
Find the target inside the current image by searching around its last known location
361
14
26,749
protected void subpixelPeak ( int peakX , int peakY ) { // this function for r was determined empirically by using work regions of 32,64,128 int r = Math . min ( 2 , response . width / 25 ) ; if ( r < 0 ) return ; localPeak . setSearchRadius ( r ) ; localPeak . search ( peakX , peakY ) ; offX = localPeak . getPeakX ( ) - peakX ; offY = localPeak . getPeakY ( ) - peakY ; }
Refine the local - peak using a search algorithm for sub - pixel accuracy .
119
16
26,750
public void performLearning ( T image ) { // use the update track location get_subwindow ( image , templateNew ) ; // Kernel Regularized Least-Squares, calculate alphas (in Fourier domain) // k = dense_gauss_kernel(sigma, x); dense_gauss_kernel ( sigma , templateNew , templateNew , k ) ; fft . forward ( k , kf ) ; // new_alphaf = yf ./ (fft2(k) + lambda); %(Eq. 7) computeAlphas ( gaussianWeightDFT , kf , lambda , newAlphaf ) ; // subsequent frames, interpolate model // alphaf = (1 - interp_factor) * alphaf + interp_factor * new_alphaf; int N = alphaf . width * alphaf . height * 2 ; for ( int i = 0 ; i < N ; i ++ ) { alphaf . data [ i ] = ( 1 - interp_factor ) * alphaf . data [ i ] + interp_factor * newAlphaf . data [ i ] ; } // Set the previous image to be an interpolated version // z = (1 - interp_factor) * z + interp_factor * new_z; N = templateNew . width * templateNew . height ; for ( int i = 0 ; i < N ; i ++ ) { template . data [ i ] = ( 1 - interp_factor ) * template . data [ i ] + interp_factor * templateNew . data [ i ] ; } }
Update the alphas and the track s appearance
346
9
26,751
public static double imageDotProduct ( GrayF64 a ) { double total = 0 ; int N = a . width * a . height ; for ( int index = 0 ; index < N ; index ++ ) { double value = a . data [ index ] ; total += value * value ; } return total ; }
Computes the dot product of the image with itself
66
10
26,752
public static void elementMultConjB ( InterleavedF64 a , InterleavedF64 b , InterleavedF64 output ) { for ( int y = 0 ; y < a . height ; y ++ ) { int index = a . startIndex + y * a . stride ; for ( int x = 0 ; x < a . width ; x ++ , index += 2 ) { double realA = a . data [ index ] ; double imgA = a . data [ index + 1 ] ; double realB = b . data [ index ] ; double imgB = b . data [ index + 1 ] ; output . data [ index ] = realA * realB + imgA * imgB ; output . data [ index + 1 ] = - realA * imgB + imgA * realB ; } } }
Element - wise multiplication of a and the complex conjugate of b
175
14
26,753
protected static void gaussianKernel ( double xx , double yy , GrayF64 xy , double sigma , GrayF64 output ) { double sigma2 = sigma * sigma ; double N = xy . width * xy . height ; for ( int y = 0 ; y < xy . height ; y ++ ) { int index = xy . startIndex + y * xy . stride ; for ( int x = 0 ; x < xy . width ; x ++ , index ++ ) { // (xx + yy - 2 * xy) / numel(x) double value = ( xx + yy - 2 * xy . data [ index ] ) / N ; double v = Math . exp ( - Math . max ( 0 , value ) / sigma2 ) ; output . data [ index ] = v ; } } }
Computes the output of the Gaussian kernel for each element in the target region
184
16
26,754
protected void get_subwindow ( T image , GrayF64 output ) { // copy the target region interp . setImage ( image ) ; int index = 0 ; for ( int y = 0 ; y < workRegionSize ; y ++ ) { float yy = regionTrack . y0 + y * stepY ; for ( int x = 0 ; x < workRegionSize ; x ++ ) { float xx = regionTrack . x0 + x * stepX ; if ( interp . isInFastBounds ( xx , yy ) ) output . data [ index ++ ] = interp . get_fast ( xx , yy ) ; else if ( BoofMiscOps . checkInside ( image , xx , yy ) ) output . data [ index ++ ] = interp . get ( xx , yy ) ; else { // randomize to make pixels outside the image poorly correlate. It will then focus on matching // what's inside the image since it has structure output . data [ index ++ ] = rand . nextFloat ( ) * maxPixelValue ; } } } // normalize values to be from -0.5 to 0.5 PixelMath . divide ( output , maxPixelValue , output ) ; PixelMath . plus ( output , - 0.5f , output ) ; // apply the cosine window to it PixelMath . multiply ( output , cosine , output ) ; }
Copies the target into the output image and applies the cosine window to it .
294
17
26,755
void selectBlockSize ( int width , int height , int requestedBlockWidth ) { if ( height < requestedBlockWidth ) { blockHeight = height ; } else { int rows = height / requestedBlockWidth ; blockHeight = height / rows ; } if ( width < requestedBlockWidth ) { blockWidth = width ; } else { int cols = width / requestedBlockWidth ; blockWidth = width / cols ; } }
Selects a block size which is close to the requested block size by the user
88
16
26,756
protected void applyThreshold ( T input , GrayU8 output ) { for ( int blockY = 0 ; blockY < stats . height ; blockY ++ ) { for ( int blockX = 0 ; blockX < stats . width ; blockX ++ ) { original . thresholdBlock ( blockX , blockY , input , stats , output ) ; } } }
Applies the dynamically computed threshold to each pixel in the image one block at a time
76
17
26,757
public void addImage ( T image , String cameraName ) { PairwiseImageGraph . View view = new PairwiseImageGraph . View ( graph . nodes . size ( ) , new FastQueue < TupleDesc > ( TupleDesc . class , true ) { @ Override protected TupleDesc createInstance ( ) { return detDesc . createDescription ( ) ; } } ) ; view . camera = graph . cameras . get ( cameraName ) ; if ( view . camera == null ) throw new IllegalArgumentException ( "Must have added the camera first" ) ; view . index = graph . nodes . size ( ) ; graph . nodes . add ( view ) ; detDesc . detect ( image ) ; // Pre-declare memory view . descriptions . growArray ( detDesc . getNumberOfFeatures ( ) ) ; view . observationPixels . growArray ( detDesc . getNumberOfFeatures ( ) ) ; for ( int i = 0 ; i < detDesc . getNumberOfFeatures ( ) ; i ++ ) { Point2D_F64 p = detDesc . getLocation ( i ) ; // save copies since detDesc recycles memory view . descriptions . grow ( ) . setTo ( detDesc . getDescription ( i ) ) ; view . observationPixels . grow ( ) . set ( p ) ; } if ( view . camera . pixelToNorm == null ) { return ; } view . observationNorm . growArray ( detDesc . getNumberOfFeatures ( ) ) ; for ( int i = 0 ; i < view . observationPixels . size ; i ++ ) { Point2D_F64 p = view . observationPixels . get ( i ) ; view . camera . pixelToNorm . compute ( p . x , p . y , view . observationNorm . grow ( ) ) ; } if ( verbose != null ) { verbose . println ( "Detected Features: " + detDesc . getNumberOfFeatures ( ) ) ; } }
Adds a new observation from a camera . Detects features inside the and saves those .
415
17
26,758
protected boolean connectViews ( PairwiseImageGraph . View viewA , PairwiseImageGraph . View viewB , FastQueue < AssociatedIndex > matches ) { // Estimate fundamental/essential with RANSAC PairwiseImageGraph . Motion edge = new PairwiseImageGraph . Motion ( ) ; int inliersEpipolar ; CameraPinhole pinhole0 = viewA . camera . pinhole ; CameraPinhole pinhole1 = viewB . camera . pinhole ; if ( pinhole0 != null && pinhole1 != null ) { // Fully calibrated camera pair ransacEssential . setIntrinsic ( 0 , pinhole0 ) ; ransacEssential . setIntrinsic ( 1 , pinhole1 ) ; if ( ! fitEpipolar ( matches , viewA . observationNorm . toList ( ) , viewB . observationNorm . toList ( ) , ransacEssential , edge ) ) { if ( verbose != null && verboseLevel >= 1 ) { verbose . println ( " fit essential failed" ) ; } return false ; } edge . metric = true ; inliersEpipolar = ransacEssential . getMatchSet ( ) . size ( ) ; edge . F . set ( ransacEssential . getModelParameters ( ) ) ; } else if ( fitEpipolar ( matches , viewA . observationPixels . toList ( ) , viewB . observationPixels . toList ( ) , ransacFundamental , edge ) ) { // transform is only known up to a projective transform edge . metric = false ; inliersEpipolar = ransacFundamental . getMatchSet ( ) . size ( ) ; edge . F . set ( ransacFundamental . getModelParameters ( ) ) ; } else { if ( verbose != null && verboseLevel >= 1 ) { verbose . println ( " fit fundamental failed" ) ; } return false ; } if ( inliersEpipolar < MIN_FEATURE_ASSOCIATED ) { if ( verbose != null && verboseLevel >= 1 ) { verbose . println ( " too too few inliers. " + inliersEpipolar + " min=" + MIN_FEATURE_ASSOCIATED + " obsA=" + viewA . observationNorm . size + " obsB=" + viewB . observationNorm . size ) ; } return false ; } // If only a very small number of features are associated do not consider the view double fractionA = inliersEpipolar / ( double ) viewA . descriptions . size ; double fractionB = inliersEpipolar / ( double ) viewB . descriptions . size ; if ( fractionA < MIN_ASSOCIATE_FRACTION | fractionB < MIN_ASSOCIATE_FRACTION ) return false ; // If the geometry is good for triangulation this number will be lower edge . viewSrc = viewA ; edge . viewDst = viewB ; edge . index = graph . edges . size ( ) ; viewA . connections . add ( edge ) ; viewB . connections . add ( edge ) ; graph . edges . add ( edge ) ; return true ; }
Associate features between the two views . Then compute a homography and essential matrix using LSMed . Add features to the edge if they an inlier in essential . Save fit score of homography vs essential .
676
42
26,759
boolean fitEpipolar ( FastQueue < AssociatedIndex > matches , List < Point2D_F64 > pointsA , List < Point2D_F64 > pointsB , ModelMatcher < ? , AssociatedPair > ransac , PairwiseImageGraph . Motion edge ) { pairs . resize ( matches . size ) ; for ( int i = 0 ; i < matches . size ; i ++ ) { AssociatedIndex a = matches . get ( i ) ; pairs . get ( i ) . p1 . set ( pointsA . get ( a . src ) ) ; pairs . get ( i ) . p2 . set ( pointsB . get ( a . dst ) ) ; } if ( ! ransac . process ( pairs . toList ( ) ) ) return false ; int N = ransac . getMatchSet ( ) . size ( ) ; for ( int i = 0 ; i < N ; i ++ ) { AssociatedIndex a = matches . get ( ransac . getInputIndex ( i ) ) ; edge . associated . add ( a . copy ( ) ) ; } return true ; }
Uses ransac to fit an epipolar model to the associated features . Adds list of matched features to the edge .
235
25
26,760
public void process ( T gray , GrayU8 binary ) { if ( verbose ) System . out . println ( "ENTER DetectPolygonFromContour.process()" ) ; if ( contourPadded != null && ! contourPadded . isCreatePaddedCopy ( ) ) { int padding = 2 ; if ( gray . width + padding != binary . width || gray . height + padding != binary . height ) { throw new IllegalArgumentException ( "Including padding, expected a binary image with shape " + ( gray . width + padding ) + "x" + ( gray . height + padding ) ) ; } } else { InputSanityCheck . checkSameShape ( binary , gray ) ; } if ( imageWidth != gray . width || imageHeight != gray . height ) configure ( gray . width , gray . height ) ; // reset storage for output. Call reset individually here to ensure that all references // are nulled from last time for ( int i = 0 ; i < foundInfo . size ; i ++ ) { foundInfo . get ( i ) . reset ( ) ; } foundInfo . reset ( ) ; if ( contourEdgeIntensity != null ) contourEdgeIntensity . setImage ( gray ) ; long time0 = System . nanoTime ( ) ; // find all the contours contourFinder . process ( binary ) ; long time1 = System . nanoTime ( ) ; // Using the contours find the polygons findCandidateShapes ( ) ; long time2 = System . nanoTime ( ) ; double a = ( time1 - time0 ) * 1e-6 ; double b = ( time2 - time1 ) * 1e-6 ; milliContour . update ( a ) ; milliShapes . update ( b ) ; if ( verbose ) System . out . println ( "EXIT DetectPolygonFromContour.process()" ) ; }
Examines the undistorted gray scale input image for squares . If p
405
15
26,761
void determineCornersOnBorder ( Polygon2D_F64 polygon , GrowQueue_B onImageBorder ) { onImageBorder . reset ( ) ; for ( int i = 0 ; i < polygon . size ( ) ; i ++ ) { Point2D_F64 p = polygon . get ( i ) ; onImageBorder . add ( p . x <= 1 || p . y <= 1 || p . x >= imageWidth - 2 || p . y >= imageHeight - 2 ) ; } }
Check to see if corners are touching the image border
109
10
26,762
public List < Point2D_I32 > getContour ( Info info ) { contourTmp . reset ( ) ; contourFinder . loadContour ( info . contour . externalIndex , contourTmp ) ; return contourTmp . toList ( ) ; }
Returns the undistorted contour for a shape . Data is potentially recycled the next time any function in this class is invoked .
62
26
26,763
private void removeDistortionFromContour ( List < Point2D_I32 > distorted , FastQueue < Point2D_I32 > undistorted ) { undistorted . reset ( ) ; for ( int j = 0 ; j < distorted . size ( ) ; j ++ ) { // remove distortion Point2D_I32 p = distorted . get ( j ) ; Point2D_I32 q = undistorted . grow ( ) ; distToUndist . compute ( p . x , p . y , distortedPoint ) ; // round to minimize error q . x = Math . round ( distortedPoint . x ) ; q . y = Math . round ( distortedPoint . y ) ; } }
Removes lens distortion from the found contour
150
9
26,764
protected final boolean touchesBorder ( List < Point2D_I32 > contour ) { int endX = imageWidth - 1 ; int endY = imageHeight - 1 ; for ( int j = 0 ; j < contour . size ( ) ; j ++ ) { Point2D_I32 p = contour . get ( j ) ; if ( p . x == 0 || p . y == 0 || p . x == endX || p . y == endY ) { return true ; } } return false ; }
Checks to see if some part of the contour touches the image border . Most likely cropped
111
19
26,765
public void set ( double fx , double fy , double skew ) { this . fx = fx ; this . fy = fy ; this . skew = skew ; }
Specify camera intrinsic parameters
39
5
26,766
public static < T extends ImageGray < T > , FD extends TupleDesc > Homography2D_F64 computeTransform ( T imageA , T imageB , DetectDescribePoint < T , FD > detDesc , AssociateDescription < FD > associate , ModelMatcher < Homography2D_F64 , AssociatedPair > modelMatcher ) { // get the length of the description List < Point2D_F64 > pointsA = new ArrayList <> ( ) ; FastQueue < FD > descA = UtilFeature . createQueue ( detDesc , 100 ) ; List < Point2D_F64 > pointsB = new ArrayList <> ( ) ; FastQueue < FD > descB = UtilFeature . createQueue ( detDesc , 100 ) ; // extract feature locations and descriptions from each image describeImage ( imageA , detDesc , pointsA , descA ) ; describeImage ( imageB , detDesc , pointsB , descB ) ; // Associate features between the two images associate . setSource ( descA ) ; associate . setDestination ( descB ) ; associate . associate ( ) ; // create a list of AssociatedPairs that tell the model matcher how a feature moved FastQueue < AssociatedIndex > matches = associate . getMatches ( ) ; List < AssociatedPair > pairs = new ArrayList <> ( ) ; for ( int i = 0 ; i < matches . size ( ) ; i ++ ) { AssociatedIndex match = matches . get ( i ) ; Point2D_F64 a = pointsA . get ( match . src ) ; Point2D_F64 b = pointsB . get ( match . dst ) ; pairs . add ( new AssociatedPair ( a , b , false ) ) ; } // find the best fit model to describe the change between these images if ( ! modelMatcher . process ( pairs ) ) throw new RuntimeException ( "Model Matcher failed!" ) ; // return the found image transform return modelMatcher . getModelParameters ( ) . copy ( ) ; }
Using abstracted code find a transform which minimizes the difference between corresponding features in both images . This code is completely model independent and is the core algorithms .
431
31
26,767
public static < T extends ImageGray < T > > void stitch ( BufferedImage imageA , BufferedImage imageB , Class < T > imageType ) { T inputA = ConvertBufferedImage . convertFromSingle ( imageA , null , imageType ) ; T inputB = ConvertBufferedImage . convertFromSingle ( imageB , null , imageType ) ; // Detect using the standard SURF feature descriptor and describer DetectDescribePoint detDesc = FactoryDetectDescribe . surfStable ( new ConfigFastHessian ( 1 , 2 , 200 , 1 , 9 , 4 , 4 ) , null , null , imageType ) ; ScoreAssociation < BrightFeature > scorer = FactoryAssociation . scoreEuclidean ( BrightFeature . class , true ) ; AssociateDescription < BrightFeature > associate = FactoryAssociation . greedy ( scorer , 2 , true ) ; // fit the images using a homography. This works well for rotations and distant objects. ModelMatcher < Homography2D_F64 , AssociatedPair > modelMatcher = FactoryMultiViewRobust . homographyRansac ( null , new ConfigRansac ( 60 , 3 ) ) ; Homography2D_F64 H = computeTransform ( inputA , inputB , detDesc , associate , modelMatcher ) ; renderStitching ( imageA , imageB , H ) ; }
Given two input images create and display an image where the two have been overlayed on top of each other .
291
22
26,768
public void configure ( double baseline , DMatrixRMaj K , DMatrixRMaj rectifiedR , Point2Transform2_F64 rectifiedToColor , int minDisparity , int maxDisparity ) { this . K = K ; ConvertMatrixData . convert ( rectifiedR , this . rectifiedR ) ; this . rectifiedToColor = rectifiedToColor ; this . baseline = ( float ) baseline ; this . focalLengthX = ( float ) K . get ( 0 , 0 ) ; this . focalLengthY = ( float ) K . get ( 1 , 1 ) ; this . centerX = ( float ) K . get ( 0 , 2 ) ; this . centerY = ( float ) K . get ( 1 , 2 ) ; this . minDisparity = minDisparity ; this . rangeDisparity = maxDisparity - minDisparity ; }
Stereo and intrinsic camera parameters
192
6
26,769
public void process ( ImageGray disparity , BufferedImage color ) { cloudRgb . setMaxSize ( disparity . width * disparity . height ) ; cloudXyz . setMaxSize ( disparity . width * disparity . height * 3 ) ; cloudRgb . reset ( ) ; cloudXyz . reset ( ) ; if ( disparity instanceof GrayU8 ) process ( ( GrayU8 ) disparity , color ) ; else process ( ( GrayF32 ) disparity , color ) ; }
Given the disparity image compute the 3D location of valid points and save pixel colors at that point
101
19
26,770
public < T extends ImageGray < T > > void corruptImage ( T original , T corrupted ) { GGrayImageOps . stretch ( original , valueScale , valueOffset , 255.0 , corrupted ) ; GImageMiscOps . addGaussian ( corrupted , rand , valueNoise , 0 , 255 ) ; GPixelMath . boundImage ( corrupted , 0 , 255 ) ; }
Applies the specified corruption to the image .
81
9
26,771
protected boolean findScaleH ( DMatrixRMaj H ) { if ( ! svd . decompose ( H ) ) return false ; Arrays . sort ( svd . getSingularValues ( ) , 0 , 3 ) ; double scale = svd . getSingularValues ( ) [ 1 ] ; CommonOps_DDRM . divide ( H , scale ) ; return true ; }
The scale of H is found by computing the second smallest singular value .
83
14
26,772
public static void vertical ( BorderIndex1D border , WlCoef_I32 coefficients , GrayI input , GrayI output ) { UtilWavelet . checkShape ( input , output ) ; final int offsetA = coefficients . offsetScaling ; final int offsetB = coefficients . offsetWavelet ; final int [ ] alpha = coefficients . scaling ; final int [ ] beta = coefficients . wavelet ; border . setLength ( input . height + input . height % 2 ) ; boolean isLarger = output . height > input . height ; for ( int x = 0 ; x < input . width ; x ++ ) { for ( int y = 0 ; y < input . height ; y += 2 ) { int scale = 0 ; int wavelet = 0 ; for ( int i = 0 ; i < alpha . length ; i ++ ) { int yy = border . getIndex ( y + i + offsetA ) ; if ( isLarger && yy >= input . height ) continue ; scale += input . get ( x , yy ) * alpha [ i ] ; } for ( int i = 0 ; i < beta . length ; i ++ ) { int yy = border . getIndex ( y + i + offsetB ) ; if ( isLarger && yy >= input . height ) continue ; wavelet += input . get ( x , yy ) * beta [ i ] ; } int outY = y / 2 ; scale = 2 * scale / coefficients . denominatorScaling ; wavelet = 2 * wavelet / coefficients . denominatorWavelet ; output . set ( x , outY , scale ) ; output . set ( x , output . height / 2 + outY , wavelet ) ; } } }
Performs a single level wavelet transform along the vertical axis .
365
13
26,773
public static < Input extends ImageBase < Input > , Output extends ImageBase < Output > > ConvolveInterface < Input , Output > convolve ( Kernel1D kernel , ImageType < Input > inputType , ImageType < Output > outputType , BorderType border , boolean isHorizontal ) { if ( inputType . getFamily ( ) != ImageType . Family . GRAY ) throw new IllegalArgumentException ( "Currently only gray scale image supported" ) ; Class _inputType = inputType . getImageClass ( ) ; Class _outputType = outputType == null ? null : outputType . getImageClass ( ) ; _outputType = BoofTesting . convertToGenericType ( _outputType ) ; Class < ? > borderClassType = FactoryImageBorder . lookupBorderClassType ( _inputType ) ; String direction = isHorizontal ? "horizontal" : "vertical" ; Method m ; try { switch ( border ) { case SKIP : m = ConvolveImageNoBorder . class . getMethod ( direction , kernel . getClass ( ) , _inputType , _outputType ) ; break ; case EXTENDED : m = BoofTesting . findMethod ( ConvolveImage . class , direction , kernel . getClass ( ) , _inputType , _outputType , borderClassType ) ; break ; case REFLECT : m = BoofTesting . findMethod ( ConvolveImage . class , direction , kernel . getClass ( ) , _inputType , _outputType , borderClassType ) ; break ; case WRAP : m = BoofTesting . findMethod ( ConvolveImage . class , direction , kernel . getClass ( ) , _inputType , _outputType , borderClassType ) ; break ; case NORMALIZED : m = ConvolveImageNormalized . class . getMethod ( direction , kernel . getClass ( ) , _inputType , _outputType ) ; break ; default : throw new IllegalArgumentException ( "Unknown border type " + border ) ; } } catch ( NoSuchMethodException e ) { throw new IllegalArgumentException ( "The specified convolution cannot be found" ) ; } return new GenericConvolve <> ( m , kernel , border , inputType , outputType ) ; }
Creates a filter for convolving 1D kernels along the image .
475
14
26,774
public static < T extends ImageBase < T > , O extends CameraPinhole , D extends CameraPinhole > ImageDistort < T , T > changeCameraModel ( AdjustmentType type , BorderType borderType , O original , D desired , D modified , ImageType < T > imageType ) { Class bandType = imageType . getImageClass ( ) ; boolean skip = borderType == BorderType . SKIP ; // it has to process the border at some point, so if skip is requested just skip stuff truly outside the image if ( skip ) borderType = BorderType . EXTENDED ; InterpolatePixelS interp = FactoryInterpolation . createPixelS ( 0 , 255 , InterpolationType . BILINEAR , borderType , bandType ) ; Point2Transform2_F32 undistToDist = LensDistortionOps_F32 . transformChangeModel ( type , original , desired , true , modified ) ; ImageDistort < T , T > distort = FactoryDistort . distort ( true , interp , imageType ) ; distort . setModel ( new PointToPixelTransform_F32 ( undistToDist ) ) ; distort . setRenderAll ( ! skip ) ; return distort ; }
Creates a distortion for modifying the input image from one camera model into another camera model . If requested the camera model can be further modified to ensure certain visibility requirements are meet and the adjusted camera model will be returned .
257
43
26,775
public static boolean getT ( GrayU8 image , int x , int y ) { if ( image . isInBounds ( x , y ) ) { return image . get ( x , y ) != 0 ; } else { return true ; } }
If a point is inside the image true is returned if its value is not zero otherwise true is returned .
53
21
26,776
public static boolean getF ( GrayU8 image , int x , int y ) { if ( image . isInBounds ( x , y ) ) { return image . get ( x , y ) != 0 ; } else { return false ; } }
If a point is inside the image true is returned if its value is not zero otherwise false is returned .
53
21
26,777
public boolean convert ( ChessboardCornerGraph cluster , GridInfo info ) { // default to an invalid value to ensure a failure doesn't go unnoticed. info . reset ( ) ; // Get the edges in a consistent order if ( ! orderEdges ( cluster ) ) return false ; // Now we need to order the nodes into a proper grid which follows right hand rule if ( ! orderNodes ( cluster . corners , info ) ) return false ; // select a valid corner to be (0,0). If there are multiple options select the one which is int corner = selectCorner ( info ) ; if ( corner == - 1 ) { if ( verbose != null ) verbose . println ( "Failed to find valid corner." ) ; return false ; } // rotate the grid until the select corner is at (0,0) for ( int i = 0 ; i < corner ; i ++ ) { rotateCCW ( info ) ; } return true ; }
Puts cluster nodes into grid order and computes the number of rows and columns . If the cluster is not a complete grid this function will fail and return false
200
32
26,778
int selectCorner ( GridInfo info ) { info . lookupGridCorners ( cornerList ) ; int bestCorner = - 1 ; double bestScore = Double . MAX_VALUE ; boolean bestIsCornerSquare = false ; for ( int i = 0 ; i < cornerList . size ( ) ; i ++ ) { Node n = cornerList . get ( i ) ; boolean corner = isCornerValidOrigin ( n ) ; // If there are no corner points which are valid corners, then any corner can be the origin if // allowNoCorner is true if ( corner || ( allowNoCorner && ! bestIsCornerSquare ) ) { // sanity check the shape if ( checkShape != null ) { if ( i % 2 == 0 ) { if ( ! checkShape . isValidShape ( info . rows , info . cols ) ) { continue ; } } else { if ( ! checkShape . isValidShape ( info . cols , info . rows ) ) { continue ; } } } // If the distance is to (0,0) pixel is smaller or this is a corner square and the other best // is not a corner square double distance = n . normSq ( ) ; if ( distance < bestScore || ( ! bestIsCornerSquare && corner ) ) { bestIsCornerSquare |= corner ; bestScore = distance ; bestCorner = i ; } } } info . hasCornerSquare = bestIsCornerSquare ; return bestCorner ; }
Selects a corner to be the grid s origin . 0 = top - left 1 = top - right 2 = bottom - right 3 = bottom - left .
314
32
26,779
boolean orderNodes ( FastQueue < Node > corners , GridInfo info ) { // Find a node with just two edges. This is a corner and will be the arbitrary origin in our graph Node seed = null ; for ( int i = 0 ; i < corners . size ; i ++ ) { Node n = corners . get ( i ) ; if ( n . countEdges ( ) == 2 ) { seed = n ; break ; } } if ( seed == null ) { if ( verbose != null ) verbose . println ( "Can't find a corner with just two edges. Aborting" ) ; return false ; } // find one edge and mark that as the row direction int rowEdge = 0 ; while ( seed . edges [ rowEdge ] == null ) rowEdge = ( rowEdge + 1 ) % 4 ; int colEdge = ( rowEdge + 1 ) % 4 ; while ( seed . edges [ colEdge ] == null ) colEdge = ( colEdge + 2 ) % 4 ; // if it's left handed swap the row and column direction if ( ! isRightHanded ( seed , rowEdge , colEdge ) ) { int tmp = rowEdge ; rowEdge = colEdge ; colEdge = tmp ; } // add the corns to list in a row major order while ( seed != null ) { int before = info . nodes . size ( ) ; Node n = seed ; do { info . nodes . add ( n ) ; n = n . edges [ colEdge ] ; } while ( n != null ) ; seed = seed . edges [ rowEdge ] ; if ( info . cols == - 1 ) { info . cols = info . nodes . size ( ) ; } else { int columnsInRow = info . nodes . size ( ) - before ; if ( columnsInRow != info . cols ) { if ( verbose != null ) verbose . println ( "Number of columns in each row is variable" ) ; return false ; } } } info . rows = info . nodes . size ( ) / info . cols ; return true ; }
Put corners into a proper grid . Make sure its a rectangular grid or else return false . Rows and columns are selected to ensure right hand rule .
437
30
26,780
static boolean isRightHanded ( Node seed , int idxRow , int idxCol ) { Node r = seed . edges [ idxRow ] ; Node c = seed . edges [ idxCol ] ; double dirRow = Math . atan2 ( r . y - seed . y , r . x - seed . x ) ; double dirCol = Math . atan2 ( c . y - seed . y , c . x - seed . x ) ; return UtilAngle . distanceCW ( dirRow , dirCol ) < Math . PI ; }
Checks to see if the rows and columns for a coordinate system which is right handed
120
17
26,781
void sortEdgesCCW ( FastQueue < Node > corners ) { for ( int nodeIdx = 0 ; nodeIdx < corners . size ; nodeIdx ++ ) { Node na = corners . get ( nodeIdx ) ; // reference node to do angles relative to. double ref = Double . NaN ; int count = 0 ; for ( int i = 0 ; i < 4 ; i ++ ) { order [ i ] = i ; tmpEdges [ i ] = na . edges [ i ] ; if ( na . edges [ i ] == null ) { directions [ i ] = Double . MAX_VALUE ; } else { Node nb = na . edges [ i ] ; double angleB = Math . atan2 ( nb . y - na . y , nb . x - na . x ) ; if ( Double . isNaN ( ref ) ) { ref = angleB ; directions [ i ] = 0 ; } else { directions [ i ] = UtilAngle . distanceCCW ( ref , angleB ) ; } count ++ ; } } sorter . sort ( directions , 0 , 4 , order ) ; for ( int i = 0 ; i < 4 ; i ++ ) { na . edges [ i ] = tmpEdges [ order [ i ] ] ; } if ( count == 2 ) { // If there are only two then we define the order to be defined by the one which minimizes // CCW direction if ( directions [ order [ 1 ] ] > Math . PI ) { na . edges [ 0 ] = tmpEdges [ order [ 1 ] ] ; na . edges [ 1 ] = tmpEdges [ order [ 0 ] ] ; } else { na . edges [ 0 ] = tmpEdges [ order [ 0 ] ] ; na . edges [ 1 ] = tmpEdges [ order [ 1 ] ] ; } } else if ( count == 3 ) { // Edges need to point along the 4 possible directions, in the case of 3 edges, there might // need to be a gap at a different location than at the end int selected = - 1 ; double largestAngle = 0 ; for ( int i = 0 , j = 2 ; i < 3 ; j = i , i ++ ) { double ccw = UtilAngle . distanceCCW ( directions [ order [ j ] ] , directions [ order [ i ] ] ) ; if ( ccw > largestAngle ) { largestAngle = ccw ; selected = j ; } } for ( int i = 2 ; i > selected ; i -- ) { na . edges [ i + 1 ] = na . edges [ i ] ; } na . edges [ selected + 1 ] = null ; } } }
Sorts edges so that they point towards nodes in an increasing counter clockwise direction
571
16
26,782
public void rotateCCW ( GridInfo grid ) { cornerList . clear ( ) ; for ( int col = 0 ; col < grid . cols ; col ++ ) { for ( int row = 0 ; row < grid . rows ; row ++ ) { cornerList . add ( grid . get ( row , grid . cols - col - 1 ) ) ; } } int tmp = grid . rows ; grid . rows = grid . cols ; grid . cols = tmp ; grid . nodes . clear ( ) ; grid . nodes . addAll ( cornerList ) ; }
Rotates the grid in the CCW direction
120
9
26,783
public void setCalibration ( StereoParameters stereoParam ) { CameraPinholeBrown left = stereoParam . getLeft ( ) ; CameraPinholeBrown right = stereoParam . getRight ( ) ; // adjust image size imageLeftRect . reshape ( left . getWidth ( ) , left . getHeight ( ) ) ; imageRightRect . reshape ( right . getWidth ( ) , right . getHeight ( ) ) ; // compute rectification RectifyCalibrated rectifyAlg = RectifyImageOps . createCalibrated ( ) ; Se3_F64 leftToRight = stereoParam . getRightToLeft ( ) . invert ( null ) ; // original camera calibration matrices DMatrixRMaj K1 = PerspectiveOps . pinholeToMatrix ( left , ( DMatrixRMaj ) null ) ; DMatrixRMaj K2 = PerspectiveOps . pinholeToMatrix ( right , ( DMatrixRMaj ) null ) ; rectifyAlg . process ( K1 , new Se3_F64 ( ) , K2 , leftToRight ) ; // rectification matrix for each image rect1 = rectifyAlg . getRect1 ( ) ; rect2 = rectifyAlg . getRect2 ( ) ; // New calibration and rotation matrix, Both cameras are the same after rectification. rectK = rectifyAlg . getCalibrationMatrix ( ) ; rectR = rectifyAlg . getRectifiedRotation ( ) ; FMatrixRMaj rect1_F32 = new FMatrixRMaj ( 3 , 3 ) ; FMatrixRMaj rect2_F32 = new FMatrixRMaj ( 3 , 3 ) ; ConvertMatrixData . convert ( rect1 , rect1_F32 ) ; ConvertMatrixData . convert ( rect2 , rect2_F32 ) ; ImageType < T > imageType = imageLeftRect . getImageType ( ) ; distortLeftRect = RectifyImageOps . rectifyImage ( stereoParam . left , rect1_F32 , BorderType . SKIP , imageType ) ; distortRightRect = RectifyImageOps . rectifyImage ( stereoParam . right , rect2_F32 , BorderType . SKIP , imageType ) ; // Compute parameters that are needed when converting to 3D baseline = stereoParam . getBaseline ( ) ; fx = rectK . get ( 0 , 0 ) ; fy = rectK . get ( 1 , 1 ) ; cx = rectK . get ( 0 , 2 ) ; cy = rectK . get ( 1 , 2 ) ; }
Specifies stereo parameters
551
4
26,784
public void computeHomo3D ( double x , double y , Point3D_F64 pointLeft ) { // Coordinate in rectified camera frame pointRect . z = baseline * fx ; pointRect . x = pointRect . z * ( x - cx ) / fx ; pointRect . y = pointRect . z * ( y - cy ) / fy ; // rotate into the original left camera frame GeometryMath_F64 . multTran ( rectR , pointRect , pointLeft ) ; }
Given a coordinate of a point in the left rectified frame compute the point s 3D coordinate in the camera s reference frame in homogeneous coordinates . To convert the coordinate into normal 3D divide each element by the disparity .
110
45
26,785
public static < I extends ImageGray < I > , II extends ImageGray < II > > Class < II > getIntegralType ( Class < I > inputType ) { if ( inputType == GrayF32 . class ) { return ( Class < II > ) GrayF32 . class ; } else if ( inputType == GrayU8 . class ) { return ( Class < II > ) GrayS32 . class ; } else if ( inputType == GrayS32 . class ) { return ( Class < II > ) GrayS32 . class ; } else { throw new IllegalArgumentException ( "Unknown input image type: " + inputType . getSimpleName ( ) ) ; } }
Given the input image return the type of image the integral image should be .
145
15
26,786
@ Override public boolean computeStability ( int which , double disturbance , FiducialStability results ) { if ( ! getFiducialToCamera ( which , targetToCamera ) ) return false ; stability . setShape ( getSideWidth ( which ) , getSideHeight ( which ) ) ; stability . computeStability ( targetToCamera , disturbance , results ) ; return true ; }
Estimates the stability by perturbing each land mark by the specified number of pixels in the distorted image .
84
22
26,787
private void createDetectedList ( int which , List < PointIndex2D_F64 > pixels ) { detected2D3D . clear ( ) ; List < Point2D3D > all = getControl3D ( which ) ; for ( int i = 0 ; i < pixels . size ( ) ; i ++ ) { PointIndex2D_F64 a = pixels . get ( i ) ; Point2D3D b = all . get ( i ) ; pixelToNorm . compute ( a . x , a . y , b . observation ) ; detected2D3D . add ( b ) ; } }
Create the list of observed points in 2D3D
132
11
26,788
protected boolean estimatePose ( int which , List < Point2D3D > points , Se3_F64 fiducialToCamera ) { if ( ! estimatePnP . process ( points , initialEstimate ) ) { return false ; } filtered . clear ( ) ; // Don't bother if there are hardly any points to work with if ( points . size ( ) > 6 ) { w2p . configure ( lensDistortion , initialEstimate ) ; // compute the error for each point in image pixels errors . reset ( ) ; for ( int idx = 0 ; idx < detectedPixels . size ( ) ; idx ++ ) { PointIndex2D_F64 foo = detectedPixels . get ( idx ) ; w2p . transform ( points . get ( idx ) . location , predicted ) ; errors . add ( predicted . distance2 ( foo ) ) ; } // compute the prune threshold based on the standard deviation. well variance really double stdev = 0 ; for ( int i = 0 ; i < errors . size ; i ++ ) { stdev += errors . get ( i ) ; } // prune points 3 standard deviations away // Don't prune if 3 standard deviations is less than 1.5 pixels since that's about what // you would expect and you might make the solution worse double sigma3 = Math . max ( 1.5 , 4 * stdev ) ; for ( int i = 0 ; i < points . size ( ) ; i ++ ) { if ( errors . get ( i ) < sigma3 ) { filtered . add ( points . get ( i ) ) ; } } // recompute pose esitmate without the outliers if ( filtered . size ( ) != points . size ( ) ) { if ( ! estimatePnP . process ( filtered , initialEstimate ) ) { return false ; } } } else { filtered . addAll ( points ) ; } return refinePnP . fitModel ( points , initialEstimate , fiducialToCamera ) ; }
Given the mapping of 2D observations to known 3D points estimate the pose of the fiducial . This solves the P - n - P problem .
429
31
26,789
public < D extends ImageGray < D > > void transform ( D derivX , D derivY , GrayU8 binary ) { InputSanityCheck . checkSameShape ( derivX , derivY , binary ) ; transform . reshape ( derivX . width , derivY . height ) ; ImageMiscOps . fill ( transform , 0 ) ; originX = derivX . width / 2 ; originY = derivX . height / 2 ; candidates . reset ( ) ; if ( derivX instanceof GrayF32 ) _transform ( ( GrayF32 ) derivX , ( GrayF32 ) derivY , binary ) ; else if ( derivX instanceof GrayS16 ) _transform ( ( GrayS16 ) derivX , ( GrayS16 ) derivY , binary ) ; else if ( derivX instanceof GrayS32 ) _transform ( ( GrayS32 ) derivX , ( GrayS32 ) derivY , binary ) ; else throw new IllegalArgumentException ( "Unsupported derivative image type: " + derivX . getClass ( ) . getSimpleName ( ) ) ; }
Computes the Hough transform using the image gradient and a binary image which flags pixels as being edges or not .
230
23
26,790
public void parameterize ( int x , int y , float derivX , float derivY ) { // put the point in a new coordinate system centered at the image's origin // this minimizes error, which is a function of distance from origin x -= originX ; y -= originY ; float v = ( x * derivX + y * derivY ) / ( derivX * derivX + derivY * derivY ) ; // finds the foot a line normal equation and put the point into image coordinates int x0 = ( int ) ( v * derivX ) + originX ; int y0 = ( int ) ( v * derivY ) + originY ; if ( transform . isInBounds ( x0 , y0 ) ) { int index = transform . startIndex + y0 * transform . stride + x0 ; // keep track of candidate pixels so that a sparse search can be done // to detect lines if ( transform . data [ index ] ++ == 1 ) candidates . add ( x0 , y0 ) ; } }
Takes the detected point along the line and its gradient and converts it into transform space .
215
18
26,791
public static void HighLevel ( GrayF32 input ) { System . out . println ( "\n------------------- Dense High Level" ) ; DescribeImageDense < GrayF32 , TupleDesc_F64 > describer = FactoryDescribeImageDense . hog ( new ConfigDenseHoG ( ) , input . getImageType ( ) ) ; // sift(new ConfigDenseSift(),GrayF32.class); // surfFast(new ConfigDenseSurfFast(),GrayF32.class); // process the image and compute the dense image features describer . process ( input ) ; // print out part of the first few features System . out . println ( "Total Features = " + describer . getLocations ( ) . size ( ) ) ; for ( int i = 0 ; i < 5 ; i ++ ) { Point2D_I32 p = describer . getLocations ( ) . get ( i ) ; TupleDesc_F64 d = describer . getDescriptions ( ) . get ( i ) ; System . out . printf ( "%3d %3d = [ %f %f %f %f\n" , p . x , p . y , d . value [ 0 ] , d . value [ 1 ] , d . value [ 2 ] , d . value [ 3 ] ) ; // You would process the feature descriptor here } }
For much larger images you might need to shrink the image down or change the cell size to get good results .
297
22
26,792
public void process ( I left , I right , GrayF32 imageDisparity ) { // check inputs and initialize data structures InputSanityCheck . checkSameShape ( left , right , imageDisparity ) ; this . imageLeft = left ; this . imageRight = right ; w = left . width ; h = left . height ; // Compute disparity for each pixel for ( int y = radiusY * 2 ; y < h - radiusY * 2 ; y ++ ) { for ( int x = radiusX * 2 + minDisparity ; x < w - radiusX * 2 ; x ++ ) { // take in account image border when computing max disparity int max = x - Math . max ( radiusX * 2 - 1 , x - score . length ) ; // compute match score across all candidates processPixel ( x , y , max ) ; // select the best disparity imageDisparity . set ( x , y , ( float ) selectBest ( max ) ) ; } } }
Computes the disparity for two stereo images along the image s right axis . Both image must be rectified .
208
22
26,793
private void processPixel ( int c_x , int c_y , int maxDisparity ) { for ( int i = minDisparity ; i < maxDisparity ; i ++ ) { score [ i ] = computeScore ( c_x , c_x - i , c_y ) ; } }
Computes fit score for each possible disparity
68
8
26,794
protected double selectBest ( int length ) { double best = Double . MAX_VALUE ; int index = - 1 ; for ( int i = minDisparity ; i < length ; i ++ ) { if ( score [ i ] < best ) { best = score [ i ] ; index = i ; } } return index - minDisparity ; }
Select best disparity using the inner takes all approach
74
9
26,795
protected double computeScore ( int leftX , int rightX , int centerY ) { double center = computeScoreRect ( leftX , rightX , centerY ) ; four [ 0 ] = computeScoreRect ( leftX - radiusX , rightX - radiusX , centerY - radiusY ) ; four [ 1 ] = computeScoreRect ( leftX + radiusX , rightX + radiusX , centerY - radiusY ) ; four [ 2 ] = computeScoreRect ( leftX - radiusX , rightX - radiusX , centerY + radiusY ) ; four [ 3 ] = computeScoreRect ( leftX + radiusX , rightX + radiusX , centerY + radiusY ) ; Arrays . sort ( four ) ; return four [ 0 ] + four [ 1 ] + center ; }
Compute the score for five local regions and just use the center + the two best
169
17
26,796
public void render ( QrCode qr ) { initialize ( qr ) ; render . init ( ) ; positionPattern ( 0 , 0 , qr . ppCorner ) ; positionPattern ( ( numModules - 7 ) * moduleWidth , 0 , qr . ppRight ) ; positionPattern ( 0 , ( numModules - 7 ) * moduleWidth , qr . ppDown ) ; timingPattern ( 7 * moduleWidth , 6 * moduleWidth , moduleWidth , 0 ) ; timingPattern ( 6 * moduleWidth , 7 * moduleWidth , 0 , moduleWidth ) ; formatInformation ( ) ; if ( qr . version >= QrCode . VERSION_ENCODED_AT ) versionInformation ( ) ; // render alignment patterns int alignment [ ] = QrCode . VERSION_INFO [ qr . version ] . alignment ; for ( int i = 0 ; i < alignment . length ; i ++ ) { int row = alignment [ i ] ; for ( int j = 0 ; j < alignment . length ; j ++ ) { if ( i == 0 & j == 0 ) continue ; if ( i == alignment . length - 1 & j == 0 ) continue ; if ( i == 0 & j == alignment . length - 1 ) continue ; int col = alignment [ j ] ; alignmentPattern ( col , row ) ; } } if ( renderData ) { if ( qr . rawbits . length != QrCode . VERSION_INFO [ qr . version ] . codewords ) throw new RuntimeException ( "Unexpected length of raw data." ) ; // mark which modules can store data bitLocations = new QrCodeCodeWordLocations ( qr . version ) . bits ; int numBytes = bitLocations . size ( ) / 8 ; if ( numBytes != qr . rawbits . length ) throw new RuntimeException ( "Egads. unexpected length of qrcode raw data" ) ; // Render the output data renderData ( ) ; } qr . bounds . set ( 0 , 0 , 0 ) ; qr . bounds . set ( 1 , markerWidth , 0 ) ; qr . bounds . set ( 2 , markerWidth , markerWidth ) ; qr . bounds . set ( 3 , 0 , markerWidth ) ; }
Generates a QR Code with the specified message . An exception is thrown if the message is too long to be encoded .
480
24
26,797
private void renderData ( ) { QrCodeMaskPattern mask = qr . mask ; int count = 0 ; int length = bitLocations . size ( ) - bitLocations . size ( ) % 8 ; while ( count < length ) { int bits = qr . rawbits [ count / 8 ] & 0xFF ; int N = Math . min ( 8 , bitLocations . size ( ) - count ) ; for ( int i = 0 ; i < N ; i ++ ) { Point2D_I32 coor = bitLocations . get ( count + i ) ; int value = mask . apply ( coor . y , coor . x , ( ( bits >> i ) & 0x01 ) ) ; // int value = ((bits >> i ) & 0x01); if ( value > 0 ) { square ( coor . y , coor . x ) ; } } count += 8 ; } }
Renders the raw data bit output while applying the selected mask
197
12
26,798
public static CameraPinholeBrown loadPinholeRadial ( String fileName ) { FileStorage fs = new FileStorage ( new File ( fileName ) . getAbsolutePath ( ) , FileStorage . READ ) ; IntPointer width = new IntPointer ( 1 ) ; IntPointer height = new IntPointer ( 1 ) ; read ( fs . get ( "image_width" ) , width , - 1 ) ; read ( fs . get ( "image_height" ) , height , - 1 ) ; Mat K = new Mat ( ) ; read ( fs . get ( "camera_matrix" ) , K ) ; Mat distortion = new Mat ( ) ; read ( fs . get ( "distortion_coefficients" ) , distortion ) ; CameraPinholeBrown boof = new CameraPinholeBrown ( ) ; boof . width = width . get ( ) ; boof . height = height . get ( ) ; DoubleRawIndexer indexerK = K . createIndexer ( ) ; boof . fx = indexerK . get ( 0 , 0 ) ; boof . skew = indexerK . get ( 0 , 1 ) ; boof . fy = indexerK . get ( 1 , 1 ) ; boof . cx = indexerK . get ( 0 , 2 ) ; boof . cy = indexerK . get ( 1 , 2 ) ; DoubleRawIndexer indexerD = distortion . createIndexer ( ) ; if ( distortion . rows ( ) >= 5 ) boof . setRadial ( indexerD . get ( 0 , 0 ) , indexerD . get ( 1 , 0 ) , indexerD . get ( 4 , 0 ) ) ; else if ( distortion . rows ( ) >= 2 ) boof . setRadial ( indexerD . get ( 0 , 0 ) , indexerD . get ( 1 , 0 ) ) ; if ( distortion . rows ( ) >= 5 ) boof . fsetTangental ( indexerD . get ( 2 , 0 ) , indexerD . get ( 3 , 0 ) ) ; return boof ; }
Loads a pinhole camera model with radian and tangential distortion in OpenCV format
449
18
26,799
public double distanceSqCorner ( Point2D_F64 p ) { double best = Double . MAX_VALUE ; for ( int i = 0 ; i < 4 ; i ++ ) { double d = square . get ( i ) . distance2 ( p ) ; if ( d < best ) { best = d ; } } return best ; }
Finds the Euclidean distance squared of the closest corner to point p
74
15