idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
27,100
public static ImageGridPanel showGrid ( int numColumns , String title , BufferedImage ... images ) { JFrame frame = new JFrame ( title ) ; int numRows = images . length / numColumns + images . length % numColumns ; ImageGridPanel panel = new ImageGridPanel ( numRows , numColumns , images ) ; frame . add ( panel , BorderLayout . CENTER ) ; frame . pack ( ) ; frame . setVisible ( true ) ; return panel ; }
Shows a set of images in a grid pattern .
108
11
27,101
public static JFrame setupWindow ( final JComponent component , String title , final boolean closeOnExit ) { BoofSwingUtil . checkGuiThread ( ) ; final JFrame frame = new JFrame ( title ) ; frame . add ( component , BorderLayout . CENTER ) ; frame . pack ( ) ; frame . setLocationRelativeTo ( null ) ; // centers window in the monitor if ( closeOnExit ) frame . setDefaultCloseOperation ( JFrame . EXIT_ON_CLOSE ) ; return frame ; }
Sets up the window but doesn t show it . Must be called in a GUI thread
112
18
27,102
public static void applyBoxFilter ( GrayF32 input ) { // declare storage GrayF32 boxImage = new GrayF32 ( input . width , input . height ) ; InterleavedF32 boxTransform = new InterleavedF32 ( input . width , input . height , 2 ) ; InterleavedF32 transform = new InterleavedF32 ( input . width , input . height , 2 ) ; GrayF32 blurredImage = new GrayF32 ( input . width , input . height ) ; GrayF32 spatialBlur = new GrayF32 ( input . width , input . height ) ; DiscreteFourierTransform < GrayF32 , InterleavedF32 > dft = DiscreteFourierTransformOps . createTransformF32 ( ) ; // Make the image scaled from 0 to 1 to reduce overflow issues PixelMath . divide ( input , 255.0f , input ) ; // compute the Fourier Transform dft . forward ( input , transform ) ; // create the box filter which is centered around the pixel. Note that the filter gets wrapped around // the image edges for ( int y = 0 ; y < 15 ; y ++ ) { int yy = y - 7 < 0 ? boxImage . height + ( y - 7 ) : y - 7 ; for ( int x = 0 ; x < 15 ; x ++ ) { int xx = x - 7 < 0 ? boxImage . width + ( x - 7 ) : x - 7 ; // Set the value such that it doesn't change the image intensity boxImage . set ( xx , yy , 1.0f / ( 15 * 15 ) ) ; } } // compute the DFT for the box filter dft . forward ( boxImage , boxTransform ) ; // Visualize the Fourier Transform for the input image and the box filter displayTransform ( transform , "Input Image" ) ; displayTransform ( boxTransform , "Box Filter" ) ; // apply the filter. convolution in spacial domain is the same as multiplication in the frequency domain DiscreteFourierTransformOps . multiplyComplex ( transform , boxTransform , transform ) ; // convert the image back and display the results dft . inverse ( transform , blurredImage ) ; // undo change of scale PixelMath . multiply ( blurredImage , 255.0f , blurredImage ) ; PixelMath . multiply ( input , 255.0f , input ) ; // For sake of comparison, let's compute the box blur filter in the spatial domain // NOTE: The image border will be different since the frequency domain wraps around and this implementation // of the spacial domain adapts the kernel size BlurImageOps . mean ( input , spatialBlur , 7 , null , null ) ; // Convert to BufferedImage for output BufferedImage originOut = ConvertBufferedImage . convertTo ( input , null ) ; BufferedImage spacialOut = ConvertBufferedImage . convertTo ( spatialBlur , null ) ; BufferedImage blurredOut = ConvertBufferedImage . convertTo ( blurredImage , null ) ; ListDisplayPanel listPanel = new ListDisplayPanel ( ) ; listPanel . addImage ( originOut , "Original Image" ) ; listPanel . addImage ( spacialOut , "Spacial Domain Box" ) ; listPanel . addImage ( blurredOut , "Frequency Domain Box" ) ; ShowImages . showWindow ( listPanel , "Box Blur in Spacial and Frequency Domain of Input Image" ) ; }
Demonstration of how to apply a box filter in the frequency domain and compares the results to a box filter which has been applied in the spatial domain
726
29
27,103
public static void displayTransform ( InterleavedF32 transform , String name ) { // declare storage GrayF32 magnitude = new GrayF32 ( transform . width , transform . height ) ; GrayF32 phase = new GrayF32 ( transform . width , transform . height ) ; // Make a copy so that you don't modify the input transform = transform . clone ( ) ; // shift the zero-frequency into the image center, as is standard in image processing DiscreteFourierTransformOps . shiftZeroFrequency ( transform , true ) ; // Compute the transform's magnitude and phase DiscreteFourierTransformOps . magnitude ( transform , magnitude ) ; DiscreteFourierTransformOps . phase ( transform , phase ) ; // Convert it to a log scale for visibility PixelMath . log ( magnitude , magnitude ) ; // Display the results BufferedImage visualMag = VisualizeImageData . grayMagnitude ( magnitude , null , - 1 ) ; BufferedImage visualPhase = VisualizeImageData . colorizeSign ( phase , null , Math . PI ) ; ImageGridPanel dual = new ImageGridPanel ( 1 , 2 , visualMag , visualPhase ) ; ShowImages . showWindow ( dual , "Magnitude and Phase of " + name ) ; }
Display the fourier transform s magnitude and phase .
263
10
27,104
public static DMatrixRMaj robustFundamental ( List < AssociatedPair > matches , List < AssociatedPair > inliers , double inlierThreshold ) { ConfigRansac configRansac = new ConfigRansac ( ) ; configRansac . inlierThreshold = inlierThreshold ; configRansac . maxIterations = 1000 ; ConfigFundamental configFundamental = new ConfigFundamental ( ) ; configFundamental . which = EnumFundamental . LINEAR_7 ; configFundamental . numResolve = 2 ; configFundamental . errorModel = ConfigFundamental . ErrorModel . GEOMETRIC ; // geometric error is the most accurate error metric, but also the slowest to compute. See how the // results change if you switch to sampson and how much faster it is. You also should adjust // the inlier threshold. ModelMatcher < DMatrixRMaj , AssociatedPair > ransac = FactoryMultiViewRobust . fundamentalRansac ( configFundamental , configRansac ) ; // Estimate the fundamental matrix while removing outliers if ( ! ransac . process ( matches ) ) throw new IllegalArgumentException ( "Failed" ) ; // save the set of features that were used to compute the fundamental matrix inliers . addAll ( ransac . getMatchSet ( ) ) ; // Improve the estimate of the fundamental matrix using non-linear optimization DMatrixRMaj F = new DMatrixRMaj ( 3 , 3 ) ; ModelFitter < DMatrixRMaj , AssociatedPair > refine = FactoryMultiView . fundamentalRefine ( 1e-8 , 400 , EpipolarError . SAMPSON ) ; if ( ! refine . fitModel ( inliers , ransac . getModelParameters ( ) , F ) ) throw new IllegalArgumentException ( "Failed" ) ; // Return the solution return F ; }
Given a set of noisy observations compute the Fundamental matrix while removing the noise .
410
15
27,105
public static DMatrixRMaj simpleFundamental ( List < AssociatedPair > matches ) { // Use the 8-point algorithm since it will work with an arbitrary number of points Estimate1ofEpipolar estimateF = FactoryMultiView . fundamental_1 ( EnumFundamental . LINEAR_8 , 0 ) ; DMatrixRMaj F = new DMatrixRMaj ( 3 , 3 ) ; if ( ! estimateF . process ( matches , F ) ) throw new IllegalArgumentException ( "Failed" ) ; // while not done here, this initial linear estimate can be refined using non-linear optimization // as was done above. return F ; }
If the set of associated features are known to be correct then the fundamental matrix can be computed directly with a lot less code . The down side is that this technique is very sensitive to noise .
140
38
27,106
public boolean applyErrorCorrection ( QrCode qr ) { // System.out.println("decoder ver "+qr.version); // System.out.println("decoder mask "+qr.mask); // System.out.println("decoder error "+qr.error); QrCode . VersionInfo info = QrCode . VERSION_INFO [ qr . version ] ; QrCode . BlockInfo block = info . levels . get ( qr . error ) ; int wordsBlockAllA = block . codewords ; int wordsBlockDataA = block . dataCodewords ; int wordsEcc = wordsBlockAllA - wordsBlockDataA ; int numBlocksA = block . blocks ; int wordsBlockAllB = wordsBlockAllA + 1 ; int wordsBlockDataB = wordsBlockDataA + 1 ; int numBlocksB = ( info . codewords - wordsBlockAllA * numBlocksA ) / wordsBlockAllB ; int totalBlocks = numBlocksA + numBlocksB ; int totalDataBytes = wordsBlockDataA * numBlocksA + wordsBlockDataB * numBlocksB ; qr . corrected = new byte [ totalDataBytes ] ; ecc . resize ( wordsEcc ) ; rscodes . generator ( wordsEcc ) ; if ( ! decodeBlocks ( qr , wordsBlockDataA , numBlocksA , 0 , 0 , totalDataBytes , totalBlocks ) ) return false ; return decodeBlocks ( qr , wordsBlockDataB , numBlocksB , numBlocksA * wordsBlockDataA , numBlocksA , totalDataBytes , totalBlocks ) ; }
Reconstruct the data while applying error correction .
345
10
27,107
private QrCode . Mode updateModeLogic ( QrCode . Mode current , QrCode . Mode candidate ) { if ( current == candidate ) return current ; else if ( current == QrCode . Mode . UNKNOWN ) { return candidate ; } else { return QrCode . Mode . MIXED ; } }
If only one mode then that mode is used . If more than one mode is used then set to multiple
69
21
27,108
boolean checkPaddingBytes ( QrCode qr , int lengthBytes ) { boolean a = true ; for ( int i = lengthBytes ; i < qr . corrected . length ; i ++ ) { if ( a ) { if ( 0b00110111 != ( qr . corrected [ i ] & 0xFF ) ) return false ; } else { if ( 0b10001000 != ( qr . corrected [ i ] & 0xFF ) ) { // the pattern starts over at the beginning of a block. Strictly enforcing the standard // requires knowing size of a data chunk and where it starts. Possible but // probably not worth the effort the implement as a strict requirement. if ( 0b00110111 == ( qr . corrected [ i ] & 0xFF ) ) { a = true ; } else { return false ; } } } a = ! a ; } return true ; }
Makes sure the used bytes have the expected values
194
10
27,109
private int decodeNumeric ( QrCode qr , PackedBits8 data , int bitLocation ) { int lengthBits = QrCodeEncoder . getLengthBitsNumeric ( qr . version ) ; int length = data . read ( bitLocation , lengthBits , true ) ; bitLocation += lengthBits ; while ( length >= 3 ) { if ( data . size < bitLocation + 10 ) { qr . failureCause = QrCode . Failure . MESSAGE_OVERFLOW ; return - 1 ; } int chunk = data . read ( bitLocation , 10 , true ) ; bitLocation += 10 ; int valA = chunk / 100 ; int valB = ( chunk - valA * 100 ) / 10 ; int valC = chunk - valA * 100 - valB * 10 ; workString . append ( ( char ) ( valA + ' ' ) ) ; workString . append ( ( char ) ( valB + ' ' ) ) ; workString . append ( ( char ) ( valC + ' ' ) ) ; length -= 3 ; } if ( length == 2 ) { if ( data . size < bitLocation + 7 ) { qr . failureCause = QrCode . Failure . MESSAGE_OVERFLOW ; return - 1 ; } int chunk = data . read ( bitLocation , 7 , true ) ; bitLocation += 7 ; int valA = chunk / 10 ; int valB = chunk - valA * 10 ; workString . append ( ( char ) ( valA + ' ' ) ) ; workString . append ( ( char ) ( valB + ' ' ) ) ; } else if ( length == 1 ) { if ( data . size < bitLocation + 4 ) { qr . failureCause = QrCode . Failure . MESSAGE_OVERFLOW ; return - 1 ; } int valA = data . read ( bitLocation , 4 , true ) ; bitLocation += 4 ; workString . append ( ( char ) ( valA + ' ' ) ) ; } return bitLocation ; }
Decodes a numeric message
441
5
27,110
private int decodeAlphanumeric ( QrCode qr , PackedBits8 data , int bitLocation ) { int lengthBits = QrCodeEncoder . getLengthBitsAlphanumeric ( qr . version ) ; int length = data . read ( bitLocation , lengthBits , true ) ; bitLocation += lengthBits ; while ( length >= 2 ) { if ( data . size < bitLocation + 11 ) { qr . failureCause = QrCode . Failure . MESSAGE_OVERFLOW ; return - 1 ; } int chunk = data . read ( bitLocation , 11 , true ) ; bitLocation += 11 ; int valA = chunk / 45 ; int valB = chunk - valA * 45 ; workString . append ( valueToAlphanumeric ( valA ) ) ; workString . append ( valueToAlphanumeric ( valB ) ) ; length -= 2 ; } if ( length == 1 ) { if ( data . size < bitLocation + 6 ) { qr . failureCause = QrCode . Failure . MESSAGE_OVERFLOW ; return - 1 ; } int valA = data . read ( bitLocation , 6 , true ) ; bitLocation += 6 ; workString . append ( valueToAlphanumeric ( valA ) ) ; } return bitLocation ; }
Decodes alphanumeric messages
283
6
27,111
private int decodeByte ( QrCode qr , PackedBits8 data , int bitLocation ) { int lengthBits = QrCodeEncoder . getLengthBitsBytes ( qr . version ) ; int length = data . read ( bitLocation , lengthBits , true ) ; bitLocation += lengthBits ; if ( length * 8 > data . size - bitLocation ) { qr . failureCause = QrCode . Failure . MESSAGE_OVERFLOW ; return - 1 ; } byte rawdata [ ] = new byte [ length ] ; for ( int i = 0 ; i < length ; i ++ ) { rawdata [ i ] = ( byte ) data . read ( bitLocation , 8 , true ) ; bitLocation += 8 ; } // If ECI encoding is not specified use the default encoding. Unfortunately the specification is ignored // by most people here and UTF-8 is used. If an encoding is specified then that is used. String encoding = encodingEci == null ? ( forceEncoding != null ? forceEncoding : guessEncoding ( rawdata ) ) : encodingEci ; try { workString . append ( new String ( rawdata , encoding ) ) ; } catch ( UnsupportedEncodingException ignored ) { qr . failureCause = JIS_UNAVAILABLE ; return - 1 ; } return bitLocation ; }
Decodes byte messages
290
4
27,112
private int decodeKanji ( QrCode qr , PackedBits8 data , int bitLocation ) { int lengthBits = QrCodeEncoder . getLengthBitsKanji ( qr . version ) ; int length = data . read ( bitLocation , lengthBits , true ) ; bitLocation += lengthBits ; byte rawdata [ ] = new byte [ length * 2 ] ; for ( int i = 0 ; i < length ; i ++ ) { if ( data . size < bitLocation + 13 ) { qr . failureCause = QrCode . Failure . MESSAGE_OVERFLOW ; return - 1 ; } int letter = data . read ( bitLocation , 13 , true ) ; bitLocation += 13 ; letter = ( ( letter / 0x0C0 ) << 8 ) | ( letter % 0x0C0 ) ; if ( letter < 0x01F00 ) { // In the 0x8140 to 0x9FFC range letter += 0x08140 ; } else { // In the 0xE040 to 0xEBBF range letter += 0x0C140 ; } rawdata [ i * 2 ] = ( byte ) ( letter >> 8 ) ; rawdata [ i * 2 + 1 ] = ( byte ) letter ; } // Shift_JIS may not be supported in some environments: try { workString . append ( new String ( rawdata , "Shift_JIS" ) ) ; } catch ( UnsupportedEncodingException ignored ) { qr . failureCause = KANJI_UNAVAILABLE ; return - 1 ; } return bitLocation ; }
Decodes Kanji messages
351
5
27,113
NodeInfo selectSeedCorner ( ) { NodeInfo best = null ; double bestScore = 0 ; double minAngle = Math . PI + 0.1 ; for ( int i = 0 ; i < contour . size ; i ++ ) { NodeInfo info = contour . get ( i ) ; if ( info . angleBetween < minAngle ) continue ; Edge middleR = selectClosest ( info . right , info , true ) ; if ( middleR == null ) continue ; Edge middleL = selectClosest ( info , info . left , true ) ; if ( middleL == null ) continue ; if ( middleL . target != middleR . target ) continue ; // With no perspective distortion, at the correct corners difference should be zero // while the bad ones will be around 60 degrees double r = UtilAngle . bound ( middleR . angle + Math . PI ) ; double difference = UtilAngle . dist ( r , middleL . angle ) ; double score = info . angleBetween - difference ; if ( score > bestScore ) { best = info ; bestScore = score ; } } if ( best != null ) { best . marked = true ; } return best ; }
Pick a corner but avoid the pointy edges at the other end
255
13
27,114
static void bottomTwoColumns ( NodeInfo first , NodeInfo second , List < NodeInfo > column0 , List < NodeInfo > column1 ) { column0 . add ( first ) ; column0 . add ( second ) ; NodeInfo a = selectClosestN ( first , second ) ; if ( a == null ) { return ; } a . marked = true ; column1 . add ( a ) ; NodeInfo b = second ; while ( true ) { NodeInfo t = selectClosestN ( a , b ) ; if ( t == null ) break ; t . marked = true ; column1 . add ( t ) ; a = t ; t = selectClosestN ( a , b ) ; if ( t == null ) break ; t . marked = true ; column0 . add ( t ) ; b = t ; } }
Traverses along the first two columns and sets them up
180
12
27,115
static Edge selectClosest ( NodeInfo a , NodeInfo b , boolean checkSide ) { double bestScore = Double . MAX_VALUE ; Edge bestEdgeA = null ; Edge edgeAB = a . findEdge ( b ) ; double distAB = a . distance ( b ) ; if ( edgeAB == null ) { return null ; // TODO BUG! FIX! } for ( int i = 0 ; i < a . edges . size ; i ++ ) { Edge edgeA = a . edges . get ( i ) ; NodeInfo aa = a . edges . get ( i ) . target ; if ( aa . marked ) continue ; for ( int j = 0 ; j < b . edges . size ; j ++ ) { Edge edgeB = b . edges . get ( j ) ; NodeInfo bb = b . edges . get ( j ) . target ; if ( bb . marked ) continue ; if ( aa == bb ) { // System.out.println("center "+aa.ellipse.center); if ( checkSide && UtilAngle . distanceCW ( edgeAB . angle , edgeA . angle ) > Math . PI * 0.75 ) continue ; double angle = UtilAngle . dist ( edgeA . angle , edgeB . angle ) ; if ( angle < 0.3 ) continue ; double da = EllipsesIntoClusters . axisAdjustedDistanceSq ( a . ellipse , aa . ellipse ) ; double db = EllipsesIntoClusters . axisAdjustedDistanceSq ( b . ellipse , aa . ellipse ) ; da = Math . sqrt ( da ) ; db = Math . sqrt ( db ) ; // see if they are approximately the same distance double diffRatio = Math . abs ( da - db ) / Math . max ( da , db ) ; if ( diffRatio > 0.3 ) continue ; // TODO reject if too far double d = ( da + db ) / distAB + 0.1 * angle ; if ( d < bestScore ) { bestScore = d ; bestEdgeA = a . edges . get ( i ) ; } break ; } } } return bestEdgeA ; }
Finds the closest that is the same distance from the two nodes and part of an approximate equilateral triangle
476
21
27,116
static NodeInfo selectClosestSide ( NodeInfo a , NodeInfo b ) { double ratio = 1.7321 ; NodeInfo best = null ; double bestDistance = Double . MAX_VALUE ; Edge bestEdgeA = null ; Edge bestEdgeB = null ; for ( int i = 0 ; i < a . edges . size ; i ++ ) { NodeInfo aa = a . edges . get ( i ) . target ; if ( aa . marked ) continue ; for ( int j = 0 ; j < b . edges . size ; j ++ ) { NodeInfo bb = b . edges . get ( j ) . target ; if ( bb . marked ) continue ; if ( aa == bb ) { double da = EllipsesIntoClusters . axisAdjustedDistanceSq ( a . ellipse , aa . ellipse ) ; double db = EllipsesIntoClusters . axisAdjustedDistanceSq ( b . ellipse , aa . ellipse ) ; da = Math . sqrt ( da ) ; db = Math . sqrt ( db ) ; double max , min ; if ( da > db ) { max = da ; min = db ; } else { max = db ; min = da ; } // see how much it deviates from the ideal length with no distortion double diffRatio = Math . abs ( max - min * ratio ) / max ; if ( diffRatio > 0.25 ) continue ; // TODO reject if too far double d = da + db ; if ( d < bestDistance ) { bestDistance = d ; best = aa ; bestEdgeA = a . edges . get ( i ) ; bestEdgeB = b . edges . get ( j ) ; } break ; } } } // check the angles if ( best != null ) { double angleA = UtilAngle . distanceCW ( bestEdgeA . angle , bestEdgeB . angle ) ; if ( angleA < Math . PI * 0.25 ) // expected with zero distortion is 30 degrees return best ; else return null ; } return null ; }
Selects the closest node with the assumption that it s along the side of the grid .
446
18
27,117
public static void rgbToYuv ( double r , double g , double b , double yuv [ ] ) { double y = yuv [ 0 ] = 0.299 * r + 0.587 * g + 0.114 * b ; yuv [ 1 ] = 0.492 * ( b - y ) ; yuv [ 2 ] = 0.877 * ( r - y ) ; }
Conversion from RGB to YUV using same equations as Intel IPP .
84
15
27,118
public static void yuvToRgb ( double y , double u , double v , double rgb [ ] ) { rgb [ 0 ] = y + 1.13983 * v ; rgb [ 1 ] = y - 0.39465 * u - 0.58060 * v ; rgb [ 2 ] = y + 2.032 * u ; }
Conversion from YUV to RGB using same equations as Intel IPP .
74
15
27,119
public boolean process ( List < AssociatedTriple > observations , TrifocalTensor solution ) { if ( observations . size ( ) < 7 ) throw new IllegalArgumentException ( "At least 7 correspondences must be provided. Found " + observations . size ( ) ) ; // compute normalization to reduce numerical errors LowLevelMultiViewOps . computeNormalization ( observations , N1 , N2 , N3 ) ; // compute solution in normalized pixel coordinates createLinearSystem ( observations ) ; // solve for the trifocal tensor solveLinearSystem ( ) ; // enforce geometric constraints to improve solution extractEpipoles . setTensor ( solutionN ) ; extractEpipoles . extractEpipoles ( e2 , e3 ) ; enforce . process ( e2 , e3 , A ) ; enforce . extractSolution ( solutionN ) ; // undo normalization removeNormalization ( solution ) ; return true ; }
Estimates the trifocal tensor given the set of observations
192
13
27,120
protected void createLinearSystem ( List < AssociatedTriple > observations ) { int N = observations . size ( ) ; A . reshape ( 4 * N , 27 ) ; A . zero ( ) ; for ( int i = 0 ; i < N ; i ++ ) { AssociatedTriple t = observations . get ( i ) ; N1 . apply ( t . p1 , p1_norm ) ; N2 . apply ( t . p2 , p2_norm ) ; N3 . apply ( t . p3 , p3_norm ) ; insert ( i , 0 , p1_norm . x ) ; // tensor 1 insert ( i , 1 , p1_norm . y ) ; // tensor 2 insert ( i , 2 , 1 ) ; // tensor 3 } }
Constructs the linear matrix that describes from the 3 - point constraint with linear dependent rows removed
169
18
27,121
protected boolean solveLinearSystem ( ) { if ( ! svdNull . decompose ( A ) ) return false ; SingularOps_DDRM . nullVector ( svdNull , true , vectorizedSolution ) ; solutionN . convertFrom ( vectorizedSolution ) ; return true ; }
Computes the null space of the linear system to find the trifocal tensor
62
17
27,122
protected void removeNormalization ( TrifocalTensor solution ) { DMatrixRMaj N2_inv = N2 . matrixInv ( ) ; DMatrixRMaj N3_inv = N3 . matrixInv ( ) ; DMatrixRMaj N1 = this . N1 . matrix ( ) ; for ( int i = 0 ; i < 3 ; i ++ ) { DMatrixRMaj T = solution . getT ( i ) ; for ( int j = 0 ; j < 3 ; j ++ ) { for ( int k = 0 ; k < 3 ; k ++ ) { double sum = 0 ; for ( int r = 0 ; r < 3 ; r ++ ) { double n1 = N1 . get ( r , i ) ; DMatrixRMaj TN = solutionN . getT ( r ) ; for ( int s = 0 ; s < 3 ; s ++ ) { double n2 = N2_inv . get ( j , s ) ; for ( int t = 0 ; t < 3 ; t ++ ) { sum += n1 * n2 * N3_inv . get ( k , t ) * TN . get ( s , t ) ; } } } T . set ( j , k , sum ) ; } } } }
Translates the trifocal tensor back into regular coordinate system
270
14
27,123
public double computeAverageDerivative ( Point2D_F64 a , Point2D_F64 b , double tanX , double tanY ) { samplesInside = 0 ; averageUp = averageDown = 0 ; for ( int i = 0 ; i < numSamples ; i ++ ) { double x = ( b . x - a . x ) * i / ( numSamples - 1 ) + a . x ; double y = ( b . y - a . y ) * i / ( numSamples - 1 ) + a . y ; double x0 = x + tanX ; double y0 = y + tanY ; if ( ! BoofMiscOps . checkInside ( integralImage . getWidth ( ) , integralImage . getHeight ( ) , x0 , y0 ) ) continue ; double x1 = x - tanX ; double y1 = y - tanY ; if ( ! BoofMiscOps . checkInside ( integralImage . getWidth ( ) , integralImage . getHeight ( ) , x1 , y1 ) ) continue ; samplesInside ++ ; double up = integral . compute ( x , y , x0 , y0 ) ; double down = integral . compute ( x , y , x1 , y1 ) ; // don't take the abs here and require that a high score involves it being entirely black or white around // the edge. Otherwise a random image would score high averageUp += up ; averageDown += down ; } if ( samplesInside == 0 ) return 0 ; averageUp /= samplesInside ; averageDown /= samplesInside ; return averageUp - averageDown ; }
Returns average tangential derivative along the line segment . Derivative is computed in direction of tangent . A positive step in the tangent direction will have a positive value . If all samples go outside the image then zero is returned .
342
47
27,124
public static void rgbToXyz ( int r , int g , int b , double xyz [ ] ) { srgbToXyz ( r / 255.0 , g / 255.0 , b / 255.0 , xyz ) ; }
Conversion from 8 - bit RGB into XYZ . 8 - bit = range of 0 to 255 .
53
21
27,125
public void configureCamera ( CameraPinholeBrown intrinsic , Se3_F64 planeToCamera ) { this . planeToCamera = planeToCamera ; if ( ! selectOverhead . process ( intrinsic , planeToCamera ) ) throw new IllegalArgumentException ( "Can't find a reasonable overhead map. Can the camera view the plane?" ) ; overhead . centerX = selectOverhead . getCenterX ( ) ; overhead . centerY = selectOverhead . getCenterY ( ) ; createOverhead . configure ( intrinsic , planeToCamera , overhead . centerX , overhead . centerY , overhead . cellSize , selectOverhead . getOverheadWidth ( ) , selectOverhead . getOverheadHeight ( ) ) ; // used to counter act offset in overhead image origToMap . set ( overhead . centerX , overhead . centerY , 0 ) ; mapToOrigin . set ( - overhead . centerX , - overhead . centerY , 0 ) ; // fill it so there aren't any artifacts in the left over overhead . image . reshape ( selectOverhead . getOverheadWidth ( ) , selectOverhead . getOverheadHeight ( ) ) ; GImageMiscOps . fill ( overhead . image , 0 ) ; }
Camera the camera s intrinsic and extrinsic parameters . Can be called at any time .
260
18
27,126
public Se3_F64 getWorldToCurr3D ( ) { // 2D to 3D coordinates worldToCurr3D . getT ( ) . set ( - worldToCurr2D . T . y , 0 , worldToCurr2D . T . x ) ; DMatrixRMaj R = worldToCurr3D . getR ( ) ; // set rotation around Y axis. // Transpose the 2D transform since the rotation are pointing in opposite directions R . unsafe_set ( 0 , 0 , worldToCurr2D . c ) ; R . unsafe_set ( 0 , 2 , - worldToCurr2D . s ) ; R . unsafe_set ( 1 , 1 , 1 ) ; R . unsafe_set ( 2 , 0 , worldToCurr2D . s ) ; R . unsafe_set ( 2 , 2 , worldToCurr2D . c ) ; worldToCurr3D . concat ( planeToCamera , worldToCurrCam3D ) ; return worldToCurrCam3D ; }
3D motion .
231
4
27,127
public void process ( T gray , GrayU8 binary ) { results . reset ( ) ; ellipseDetector . process ( binary ) ; if ( ellipseRefiner != null ) ellipseRefiner . setImage ( gray ) ; intensityCheck . setImage ( gray ) ; List < BinaryEllipseDetectorPixel . Found > found = ellipseDetector . getFound ( ) ; for ( BinaryEllipseDetectorPixel . Found f : found ) { if ( ! intensityCheck . process ( f . ellipse ) ) { if ( verbose ) System . out . println ( "Rejecting ellipse. Initial fit didn't have intense enough edge" ) ; continue ; } EllipseInfo r = results . grow ( ) ; r . contour = f . contour ; if ( ellipseRefiner != null ) { if ( ! ellipseRefiner . process ( f . ellipse , r . ellipse ) ) { if ( verbose ) System . out . println ( "Rejecting ellipse. Refined fit didn't have an intense enough edge" ) ; results . removeTail ( ) ; continue ; } else if ( ! intensityCheck . process ( f . ellipse ) ) { if ( verbose ) System . out . println ( "Rejecting ellipse. Refined fit didn't have an intense enough edge" ) ; continue ; } } else { r . ellipse . set ( f . ellipse ) ; } r . averageInside = intensityCheck . averageInside ; r . averageOutside = intensityCheck . averageOutside ; } }
Detects ellipses inside the binary image and refines the edges for all detections inside the gray image
347
22
27,128
public boolean refine ( EllipseRotated_F64 ellipse ) { if ( autoRefine ) throw new IllegalArgumentException ( "Autorefine is true, no need to refine again" ) ; if ( ellipseRefiner == null ) throw new IllegalArgumentException ( "Refiner has not been passed in" ) ; if ( ! ellipseRefiner . process ( ellipse , ellipse ) ) { return false ; } else { return true ; } }
If auto refine is turned off an ellipse can be refined after the fact using this function provided that the refinement algorithm was passed in to the constructor
105
30
27,129
public static void colorizeSign ( GrayF32 input , float maxAbsValue , Bitmap output , byte [ ] storage ) { shapeShape ( input , output ) ; if ( storage == null ) storage = declareStorage ( output , null ) ; if ( maxAbsValue < 0 ) maxAbsValue = ImageStatistics . maxAbs ( input ) ; int indexDst = 0 ; for ( int y = 0 ; y < input . height ; y ++ ) { int indexSrc = input . startIndex + y * input . stride ; for ( int x = 0 ; x < input . width ; x ++ ) { float value = input . data [ indexSrc ++ ] ; if ( value > 0 ) { storage [ indexDst ++ ] = ( byte ) ( 255f * value / maxAbsValue ) ; storage [ indexDst ++ ] = 0 ; storage [ indexDst ++ ] = 0 ; } else { storage [ indexDst ++ ] = 0 ; storage [ indexDst ++ ] = ( byte ) ( - 255f * value / maxAbsValue ) ; storage [ indexDst ++ ] = 0 ; } storage [ indexDst ++ ] = ( byte ) 0xFF ; } } output . copyPixelsFromBuffer ( ByteBuffer . wrap ( storage ) ) ; }
Renders positive and negative values as two different colors .
273
11
27,130
public static void grayMagnitude ( GrayS32 input , int maxAbsValue , Bitmap output , byte [ ] storage ) { shapeShape ( input , output ) ; if ( storage == null ) storage = declareStorage ( output , null ) ; if ( maxAbsValue < 0 ) maxAbsValue = ImageStatistics . maxAbs ( input ) ; int indexDst = 0 ; for ( int y = 0 ; y < input . height ; y ++ ) { int indexSrc = input . startIndex + y * input . stride ; for ( int x = 0 ; x < input . width ; x ++ ) { byte gray = ( byte ) ( 255 * Math . abs ( input . data [ indexSrc ++ ] ) / maxAbsValue ) ; storage [ indexDst ++ ] = gray ; storage [ indexDst ++ ] = gray ; storage [ indexDst ++ ] = gray ; storage [ indexDst ++ ] = ( byte ) 0xFF ; } } output . copyPixelsFromBuffer ( ByteBuffer . wrap ( storage ) ) ; }
Renders the image using its gray magnitude
223
8
27,131
public static void disparity ( GrayI disparity , int minValue , int maxValue , int invalidColor , Bitmap output , byte [ ] storage ) { shapeShape ( disparity , output ) ; if ( storage == null ) storage = declareStorage ( output , null ) ; int range = maxValue - minValue ; int indexDst = 0 ; for ( int y = 0 ; y < disparity . height ; y ++ ) { for ( int x = 0 ; x < disparity . width ; x ++ ) { int v = disparity . unsafe_get ( x , y ) ; int r , g , b ; if ( v > range ) { r = ( invalidColor >> 16 ) & 0xFF ; g = ( invalidColor >> 8 ) & 0xFF ; b = ( invalidColor ) & 0xFF ; } else { g = 0 ; if ( v == 0 ) { r = b = 0 ; } else { r = 255 * v / maxValue ; b = 255 * ( maxValue - v ) / maxValue ; } } storage [ indexDst ++ ] = ( byte ) r ; storage [ indexDst ++ ] = ( byte ) g ; storage [ indexDst ++ ] = ( byte ) b ; storage [ indexDst ++ ] = ( byte ) 0xFF ; } } output . copyPixelsFromBuffer ( ByteBuffer . wrap ( storage ) ) ; }
Colorizes a disparity image .
292
6
27,132
public static void drawEdgeContours ( List < EdgeContour > contours , int color , Bitmap output , byte [ ] storage ) { if ( output . getConfig ( ) != Bitmap . Config . ARGB_8888 ) throw new IllegalArgumentException ( "Only ARGB_8888 is supported" ) ; if ( storage == null ) storage = declareStorage ( output , null ) ; else Arrays . fill ( storage , ( byte ) 0 ) ; byte r = ( byte ) ( ( color >> 16 ) & 0xFF ) ; byte g = ( byte ) ( ( color >> 8 ) & 0xFF ) ; byte b = ( byte ) ( color ) ; for ( int i = 0 ; i < contours . size ( ) ; i ++ ) { EdgeContour e = contours . get ( i ) ; for ( int j = 0 ; j < e . segments . size ( ) ; j ++ ) { EdgeSegment s = e . segments . get ( j ) ; for ( int k = 0 ; k < s . points . size ( ) ; k ++ ) { Point2D_I32 p = s . points . get ( k ) ; int index = p . y * 4 * output . getWidth ( ) + p . x * 4 ; storage [ index ++ ] = b ; storage [ index ++ ] = g ; storage [ index ++ ] = r ; storage [ index ] = ( byte ) 0xFF ; } } } output . copyPixelsFromBuffer ( ByteBuffer . wrap ( storage ) ) ; }
Draws each contour using a single color .
330
10
27,133
public void massage ( T input , T output ) { if ( clip ) { T inputAdjusted = clipInput ( input , output ) ; // configure a simple change in scale for both axises transform . a11 = input . width / ( float ) output . width ; transform . a22 = input . height / ( float ) output . height ; // this change is automatically reflected in the distortion class. It is configured to cache nothing distort . apply ( inputAdjusted , output ) ; } else { // scale each axis independently. It will have the whole image but it will be distorted transform . a11 = input . width / ( float ) output . width ; transform . a22 = input . height / ( float ) output . height ; distort . apply ( input , output ) ; } }
Clipps and scales the input iamge as neccisary
163
15
27,134
T clipInput ( T input , T output ) { double ratioInput = input . width / ( double ) input . height ; double ratioOutput = output . width / ( double ) output . height ; T a = input ; if ( ratioInput > ratioOutput ) { // clip the width int width = input . height * output . width / output . height ; int x0 = ( input . width - width ) / 2 ; int x1 = x0 + width ; clipped = input . subimage ( x0 , 0 , x1 , input . height , clipped ) ; a = clipped ; } else if ( ratioInput < ratioOutput ) { // clip the height int height = input . width * output . height / output . width ; int y0 = ( input . height - height ) / 2 ; int y1 = y0 + height ; clipped = input . subimage ( 0 , y0 , input . width , y1 , clipped ) ; a = clipped ; } return a ; }
Clip the input image to ensure a constant aspect ratio
207
11
27,135
public static int nextPow2 ( int x ) { if ( x < 1 ) throw new IllegalArgumentException ( "x must be greater or equal 1" ) ; if ( ( x & ( x - 1 ) ) == 0 ) { if ( x == 1 ) return 2 ; return x ; // x is already a power-of-two number } x |= ( x >>> 1 ) ; x |= ( x >>> 2 ) ; x |= ( x >>> 4 ) ; x |= ( x >>> 8 ) ; x |= ( x >>> 16 ) ; x |= ( x >>> 32 ) ; return x + 1 ; }
Returns the closest power - of - two number greater than or equal to x .
135
16
27,136
public static void checkImageArguments ( ImageBase image , ImageInterleaved transform ) { InputSanityCheck . checkSameShape ( image , transform ) ; if ( 2 != transform . getNumBands ( ) ) throw new IllegalArgumentException ( "The transform must have two bands" ) ; }
Checks to see if the image and its transform are appropriate sizes . The transform should have twice the width and twice the height as the image .
64
29
27,137
public static Se3_F64 estimateCameraMotion ( CameraPinholeBrown intrinsic , List < AssociatedPair > matchedNorm , List < AssociatedPair > inliers ) { ModelMatcherMultiview < Se3_F64 , AssociatedPair > epipolarMotion = FactoryMultiViewRobust . baselineRansac ( new ConfigEssential ( ) , new ConfigRansac ( 200 , 0.5 ) ) ; epipolarMotion . setIntrinsic ( 0 , intrinsic ) ; epipolarMotion . setIntrinsic ( 1 , intrinsic ) ; if ( ! epipolarMotion . process ( matchedNorm ) ) throw new RuntimeException ( "Motion estimation failed" ) ; // save inlier set for debugging purposes inliers . addAll ( epipolarMotion . getMatchSet ( ) ) ; return epipolarMotion . getModelParameters ( ) ; }
Estimates the camera motion robustly using RANSAC and a set of associated points .
183
18
27,138
public static List < AssociatedPair > convertToNormalizedCoordinates ( List < AssociatedPair > matchedFeatures , CameraPinholeBrown intrinsic ) { Point2Transform2_F64 p_to_n = LensDistortionFactory . narrow ( intrinsic ) . undistort_F64 ( true , false ) ; List < AssociatedPair > calibratedFeatures = new ArrayList <> ( ) ; for ( AssociatedPair p : matchedFeatures ) { AssociatedPair c = new AssociatedPair ( ) ; p_to_n . compute ( p . p1 . x , p . p1 . y , c . p1 ) ; p_to_n . compute ( p . p2 . x , p . p2 . y , c . p2 ) ; calibratedFeatures . add ( c ) ; } return calibratedFeatures ; }
Convert a set of associated point features from pixel coordinates into normalized image coordinates .
177
16
27,139
public static < T extends ImageBase < T > > void rectifyImages ( T distortedLeft , T distortedRight , Se3_F64 leftToRight , CameraPinholeBrown intrinsicLeft , CameraPinholeBrown intrinsicRight , T rectifiedLeft , T rectifiedRight , GrayU8 rectifiedMask , DMatrixRMaj rectifiedK , DMatrixRMaj rectifiedR ) { RectifyCalibrated rectifyAlg = RectifyImageOps . createCalibrated ( ) ; // original camera calibration matrices DMatrixRMaj K1 = PerspectiveOps . pinholeToMatrix ( intrinsicLeft , ( DMatrixRMaj ) null ) ; DMatrixRMaj K2 = PerspectiveOps . pinholeToMatrix ( intrinsicRight , ( DMatrixRMaj ) null ) ; rectifyAlg . process ( K1 , new Se3_F64 ( ) , K2 , leftToRight ) ; // rectification matrix for each image DMatrixRMaj rect1 = rectifyAlg . getRect1 ( ) ; DMatrixRMaj rect2 = rectifyAlg . getRect2 ( ) ; rectifiedR . set ( rectifyAlg . getRectifiedRotation ( ) ) ; // New calibration matrix, rectifiedK . set ( rectifyAlg . getCalibrationMatrix ( ) ) ; // Adjust the rectification to make the view area more useful RectifyImageOps . fullViewLeft ( intrinsicLeft , rect1 , rect2 , rectifiedK ) ; // undistorted and rectify images FMatrixRMaj rect1_F32 = new FMatrixRMaj ( 3 , 3 ) ; FMatrixRMaj rect2_F32 = new FMatrixRMaj ( 3 , 3 ) ; ConvertMatrixData . convert ( rect1 , rect1_F32 ) ; ConvertMatrixData . convert ( rect2 , rect2_F32 ) ; // Extending the image prevents a harsh edge reducing false matches at the image border // SKIP is another option, possibly a tinny bit faster, but has a harsh edge which will need to be filtered ImageDistort < T , T > distortLeft = RectifyImageOps . rectifyImage ( intrinsicLeft , rect1_F32 , BorderType . EXTENDED , distortedLeft . getImageType ( ) ) ; ImageDistort < T , T > distortRight = RectifyImageOps . rectifyImage ( intrinsicRight , rect2_F32 , BorderType . EXTENDED , distortedRight . getImageType ( ) ) ; distortLeft . apply ( distortedLeft , rectifiedLeft , rectifiedMask ) ; distortRight . apply ( distortedRight , rectifiedRight ) ; }
Remove lens distortion and rectify stereo images
569
8
27,140
public static void drawInliers ( BufferedImage left , BufferedImage right , CameraPinholeBrown intrinsic , List < AssociatedPair > normalized ) { Point2Transform2_F64 n_to_p = LensDistortionFactory . narrow ( intrinsic ) . distort_F64 ( false , true ) ; List < AssociatedPair > pixels = new ArrayList <> ( ) ; for ( AssociatedPair n : normalized ) { AssociatedPair p = new AssociatedPair ( ) ; n_to_p . compute ( n . p1 . x , n . p1 . y , p . p1 ) ; n_to_p . compute ( n . p2 . x , n . p2 . y , p . p2 ) ; pixels . add ( p ) ; } // display the results AssociationPanel panel = new AssociationPanel ( 20 ) ; panel . setAssociation ( pixels ) ; panel . setImages ( left , right ) ; ShowImages . showWindow ( panel , "Inlier Features" , true ) ; }
Draw inliers for debugging purposes . Need to convert from normalized to pixel coordinates .
220
17
27,141
public static double euclideanSq ( TupleDesc_F64 a , TupleDesc_F64 b ) { final int N = a . value . length ; double total = 0 ; for ( int i = 0 ; i < N ; i ++ ) { double d = a . value [ i ] - b . value [ i ] ; total += d * d ; } return total ; }
Returns the Euclidean distance squared between the two descriptors .
85
13
27,142
private float iterationSorSafe ( GrayF32 image1 , int x , int y , int pixelIndex ) { float w = SOR_RELAXATION ; float uf ; float vf ; float ui = initFlowX . data [ pixelIndex ] ; float vi = initFlowY . data [ pixelIndex ] ; float u = flowX . data [ pixelIndex ] ; float v = flowY . data [ pixelIndex ] ; float I1 = image1 . data [ pixelIndex ] ; float I2 = warpImage2 . data [ pixelIndex ] ; float I2x = warpDeriv2X . data [ pixelIndex ] ; float I2y = warpDeriv2Y . data [ pixelIndex ] ; float AU = A_safe ( x , y , flowX ) ; float AV = A_safe ( x , y , flowY ) ; flowX . data [ pixelIndex ] = uf = ( 1 - w ) * u + w * ( ( I1 - I2 + I2x * ui - I2y * ( v - vi ) ) * I2x + alpha2 * AU ) / ( I2x * I2x + alpha2 ) ; flowY . data [ pixelIndex ] = vf = ( 1 - w ) * v + w * ( ( I1 - I2 + I2y * vi - I2x * ( uf - ui ) ) * I2y + alpha2 * AV ) / ( I2y * I2y + alpha2 ) ; return ( uf - u ) * ( uf - u ) + ( vf - v ) * ( vf - v ) ; }
SOR iteration for border pixels
358
6
27,143
protected static float A_safe ( int x , int y , GrayF32 flow ) { float u0 = safe ( x - 1 , y , flow ) ; float u1 = safe ( x + 1 , y , flow ) ; float u2 = safe ( x , y - 1 , flow ) ; float u3 = safe ( x , y + 1 , flow ) ; float u4 = safe ( x - 1 , y - 1 , flow ) ; float u5 = safe ( x + 1 , y - 1 , flow ) ; float u6 = safe ( x - 1 , y + 1 , flow ) ; float u7 = safe ( x + 1 , y + 1 , flow ) ; return ( 1.0f / 6.0f ) * ( u0 + u1 + u2 + u3 ) + ( 1.0f / 12.0f ) * ( u4 + u5 + u6 + u7 ) ; }
See equation 25 . Safe version
201
6
27,144
protected static float A ( int x , int y , GrayF32 flow ) { int index = flow . getIndex ( x , y ) ; float u0 = flow . data [ index - 1 ] ; float u1 = flow . data [ index + 1 ] ; float u2 = flow . data [ index - flow . stride ] ; float u3 = flow . data [ index + flow . stride ] ; float u4 = flow . data [ index - 1 - flow . stride ] ; float u5 = flow . data [ index + 1 - flow . stride ] ; float u6 = flow . data [ index - 1 + flow . stride ] ; float u7 = flow . data [ index + 1 + flow . stride ] ; return ( 1.0f / 6.0f ) * ( u0 + u1 + u2 + u3 ) + ( 1.0f / 12.0f ) * ( u4 + u5 + u6 + u7 ) ; }
See equation 25 . Fast unsafe version
208
7
27,145
protected static float safe ( int x , int y , GrayF32 image ) { if ( x < 0 ) x = 0 ; else if ( x >= image . width ) x = image . width - 1 ; if ( y < 0 ) y = 0 ; else if ( y >= image . height ) y = image . height - 1 ; return image . unsafe_get ( x , y ) ; }
Ensures pixel values are inside the image . If output it is assigned to the nearest pixel inside the image
84
22
27,146
public void search ( float cx , float cy ) { peakX = cx ; peakY = cy ; setRegion ( cx , cy ) ; for ( int i = 0 ; i < maxIterations ; i ++ ) { float total = 0 ; float sumX = 0 , sumY = 0 ; int kernelIndex = 0 ; // see if it can use fast interpolation otherwise use the safer technique if ( interpolate . isInFastBounds ( x0 , y0 ) && interpolate . isInFastBounds ( x0 + width - 1 , y0 + width - 1 ) ) { for ( int yy = 0 ; yy < width ; yy ++ ) { for ( int xx = 0 ; xx < width ; xx ++ ) { float w = weights . weightIndex ( kernelIndex ++ ) ; float weight = w * interpolate . get_fast ( x0 + xx , y0 + yy ) ; total += weight ; sumX += weight * ( xx + x0 ) ; sumY += weight * ( yy + y0 ) ; } } } else { for ( int yy = 0 ; yy < width ; yy ++ ) { for ( int xx = 0 ; xx < width ; xx ++ ) { float w = weights . weightIndex ( kernelIndex ++ ) ; float weight = w * interpolate . get ( x0 + xx , y0 + yy ) ; total += weight ; sumX += weight * ( xx + x0 ) ; sumY += weight * ( yy + y0 ) ; } } } cx = sumX / total ; cy = sumY / total ; setRegion ( cx , cy ) ; float dx = cx - peakX ; float dy = cy - peakY ; peakX = cx ; peakY = cy ; if ( Math . abs ( dx ) < convergenceTol && Math . abs ( dy ) < convergenceTol ) { break ; } } }
Performs a mean - shift search center at the specified coordinates
407
12
27,147
protected void setRegion ( float cx , float cy ) { x0 = cx - radius ; y0 = cy - radius ; if ( x0 < 0 ) { x0 = 0 ; } else if ( x0 + width > image . width ) { x0 = image . width - width ; } if ( y0 < 0 ) { y0 = 0 ; } else if ( y0 + width > image . height ) { y0 = image . height - width ; } }
Updates the location of the rectangular bounding box
101
10
27,148
public void gaussianDerivToDirectDeriv ( ) { T blur = GeneralizedImageOps . createSingleBand ( imageType , width , height ) ; T blurDeriv = GeneralizedImageOps . createSingleBand ( imageType , width , height ) ; T gaussDeriv = GeneralizedImageOps . createSingleBand ( imageType , width , height ) ; BlurStorageFilter < T > funcBlur = FactoryBlurFilter . gaussian ( ImageType . single ( imageType ) , sigma , radius ) ; ImageGradient < T , T > funcDeriv = FactoryDerivative . three ( imageType , imageType ) ; ImageGradient < T , T > funcGaussDeriv = FactoryDerivative . gaussian ( sigma , radius , imageType , imageType ) ; funcBlur . process ( input , blur ) ; funcDeriv . process ( blur , blurDeriv , derivY ) ; funcGaussDeriv . process ( input , gaussDeriv , derivY ) ; printIntensity ( "Blur->Deriv" , blurDeriv ) ; printIntensity ( "Gauss Deriv" , gaussDeriv ) ; }
Compare computing the image
253
4
27,149
public static PaperSize lookup ( String word ) { for ( PaperSize paper : values ) { if ( paper . name . compareToIgnoreCase ( word ) == 0 ) { return paper ; } } return null ; }
Sees if the specified work matches any of the units full name or short name .
46
17
27,150
public static < In extends ImageBase < In > , Out extends ImageBase < Out > , K extends Kernel1D , B extends ImageBorder < In > > void horizontal ( K kernel , In input , Out output , B border ) { switch ( input . getImageType ( ) . getFamily ( ) ) { case GRAY : { if ( input instanceof GrayF32 ) { ConvolveImage . horizontal ( ( Kernel1D_F32 ) kernel , ( GrayF32 ) input , ( GrayF32 ) output , ( ImageBorder_F32 ) border ) ; } else if ( input instanceof GrayU8 ) { if ( GrayI16 . class . isAssignableFrom ( output . getClass ( ) ) ) ConvolveImage . horizontal ( ( Kernel1D_S32 ) kernel , ( GrayU8 ) input , ( GrayI16 ) output , ( ImageBorder_S32 ) border ) ; else ConvolveImage . horizontal ( ( Kernel1D_S32 ) kernel , ( GrayU8 ) input , ( GrayS32 ) output , ( ImageBorder_S32 ) border ) ; } else if ( input instanceof GrayS16 ) { ConvolveImage . horizontal ( ( Kernel1D_S32 ) kernel , ( GrayS16 ) input , ( GrayI16 ) output , ( ImageBorder_S32 ) border ) ; } else { throw new IllegalArgumentException ( "Unknown image type: " + input . getClass ( ) . getName ( ) ) ; } } break ; case INTERLEAVED : { if ( input instanceof InterleavedF32 ) { ConvolveImage . horizontal ( ( Kernel1D_F32 ) kernel , ( InterleavedF32 ) input , ( InterleavedF32 ) output , ( ImageBorder_IL_F32 ) border ) ; } else if ( input instanceof InterleavedU8 ) { if ( InterleavedI16 . class . isAssignableFrom ( output . getClass ( ) ) ) ConvolveImage . horizontal ( ( Kernel1D_S32 ) kernel , ( InterleavedU8 ) input , ( InterleavedI16 ) output , ( ImageBorder_IL_S32 ) border ) ; else ConvolveImage . horizontal ( ( Kernel1D_S32 ) kernel , ( InterleavedU8 ) input , ( InterleavedS32 ) output , ( ImageBorder_IL_S32 ) border ) ; } else if ( input instanceof InterleavedS16 ) { ConvolveImage . horizontal ( ( Kernel1D_S32 ) kernel , ( InterleavedS16 ) input , ( InterleavedU16 ) output , ( ImageBorder_IL_S32 ) border ) ; } else { throw new IllegalArgumentException ( "Unknown image type: " + input . getClass ( ) . getName ( ) ) ; } } break ; case PLANAR : { Planar inp = ( Planar ) input ; Planar outp = ( Planar ) output ; for ( int i = 0 ; i < inp . getNumBands ( ) ; i ++ ) { horizontal ( kernel , inp . getBand ( i ) , outp . getBand ( i ) , ( ImageBorder ) border ) ; } } break ; } }
Performs a horizontal 1D convolution across the image . Borders are handled as specified by the border parameter .
694
22
27,151
public static < In extends ImageBase < In > , Out extends ImageBase < Out > , K extends Kernel1D > void horizontal ( K kernel , In input , Out output ) { switch ( input . getImageType ( ) . getFamily ( ) ) { case GRAY : { if ( input instanceof GrayF32 ) { ConvolveImageNoBorder . horizontal ( ( Kernel1D_F32 ) kernel , ( GrayF32 ) input , ( GrayF32 ) output ) ; } else if ( input instanceof GrayU8 ) { if ( GrayI16 . class . isAssignableFrom ( output . getClass ( ) ) ) ConvolveImageNoBorder . horizontal ( ( Kernel1D_S32 ) kernel , ( GrayU8 ) input , ( GrayI16 ) output ) ; else ConvolveImageNoBorder . horizontal ( ( Kernel1D_S32 ) kernel , ( GrayU8 ) input , ( GrayS32 ) output ) ; } else if ( input instanceof GrayS16 ) { ConvolveImageNoBorder . horizontal ( ( Kernel1D_S32 ) kernel , ( GrayS16 ) input , ( GrayI16 ) output ) ; } else { throw new IllegalArgumentException ( "Unknown image type: " + input . getClass ( ) . getName ( ) ) ; } } break ; case INTERLEAVED : { if ( output instanceof InterleavedF32 ) { ConvolveImageNoBorder . horizontal ( ( Kernel1D_F32 ) kernel , ( InterleavedF32 ) input , ( InterleavedF32 ) output ) ; } else if ( input instanceof InterleavedU8 ) { if ( InterleavedI16 . class . isAssignableFrom ( output . getClass ( ) ) ) ConvolveImageNoBorder . horizontal ( ( Kernel1D_S32 ) kernel , ( InterleavedU8 ) input , ( InterleavedI16 ) output ) ; else ConvolveImageNoBorder . horizontal ( ( Kernel1D_S32 ) kernel , ( InterleavedU8 ) input , ( InterleavedS32 ) output ) ; } else if ( input instanceof InterleavedS16 ) { ConvolveImageNoBorder . horizontal ( ( Kernel1D_S32 ) kernel , ( InterleavedS16 ) input , ( InterleavedI16 ) output ) ; } else { throw new IllegalArgumentException ( "Unknown image type: " + input . getClass ( ) . getName ( ) ) ; } } break ; case PLANAR : { Planar inp = ( Planar ) input ; Planar outp = ( Planar ) output ; for ( int i = 0 ; i < inp . getNumBands ( ) ; i ++ ) { horizontal ( kernel , inp . getBand ( i ) , outp . getBand ( i ) ) ; } } break ; default : throw new IllegalArgumentException ( "Unknown image family" ) ; } }
Performs a horizontal 1D convolution across the image . The horizontal border is not processed .
629
19
27,152
public static < In extends ImageBase , Out extends ImageBase , K extends Kernel1D > void horizontalNormalized ( K kernel , In input , Out output ) { switch ( input . getImageType ( ) . getFamily ( ) ) { case GRAY : { if ( input instanceof GrayF32 ) { ConvolveImageNormalized . horizontal ( ( Kernel1D_F32 ) kernel , ( GrayF32 ) input , ( GrayF32 ) output ) ; } else if ( input instanceof GrayF64 ) { ConvolveImageNormalized . horizontal ( ( Kernel1D_F64 ) kernel , ( GrayF64 ) input , ( GrayF64 ) output ) ; } else if ( input instanceof GrayU8 ) { ConvolveImageNormalized . horizontal ( ( Kernel1D_S32 ) kernel , ( GrayU8 ) input , ( GrayI8 ) output ) ; } else if ( input instanceof GrayS16 ) { ConvolveImageNormalized . horizontal ( ( Kernel1D_S32 ) kernel , ( GrayS16 ) input , ( GrayI16 ) output ) ; } else { throw new IllegalArgumentException ( "Unknown image type: " + input . getClass ( ) . getName ( ) ) ; } } break ; case INTERLEAVED : { if ( input instanceof InterleavedF32 ) { ConvolveImageNormalized . horizontal ( ( Kernel1D_F32 ) kernel , ( InterleavedF32 ) input , ( InterleavedF32 ) output ) ; } else if ( input instanceof InterleavedF64 ) { ConvolveImageNormalized . horizontal ( ( Kernel1D_F64 ) kernel , ( InterleavedF64 ) input , ( InterleavedF64 ) output ) ; } else if ( input instanceof InterleavedU8 ) { ConvolveImageNormalized . horizontal ( ( Kernel1D_S32 ) kernel , ( InterleavedU8 ) input , ( InterleavedI8 ) output ) ; } else if ( input instanceof InterleavedS16 ) { ConvolveImageNormalized . horizontal ( ( Kernel1D_S32 ) kernel , ( InterleavedS16 ) input , ( InterleavedI16 ) output ) ; } else { throw new IllegalArgumentException ( "Unknown image type: " + input . getClass ( ) . getName ( ) ) ; } } break ; case PLANAR : { Planar inp = ( Planar ) input ; Planar outp = ( Planar ) output ; for ( int i = 0 ; i < inp . getNumBands ( ) ; i ++ ) { horizontalNormalized ( kernel , inp . getBand ( i ) , outp . getBand ( i ) ) ; } } break ; default : throw new IllegalArgumentException ( "Unknown image family" ) ; } }
Performs a horizontal 1D convolution across the image while re - normalizing the kernel depending on its overlap with the image .
605
26
27,153
public static < T extends ImageBase < T > , K extends Kernel2D > void convolveNormalized ( K kernel , T input , T output ) { switch ( input . getImageType ( ) . getFamily ( ) ) { case GRAY : { if ( input instanceof GrayF32 ) { ConvolveImageNormalized . convolve ( ( Kernel2D_F32 ) kernel , ( GrayF32 ) input , ( GrayF32 ) output ) ; } else if ( input instanceof GrayF64 ) { ConvolveImageNormalized . convolve ( ( Kernel2D_F64 ) kernel , ( GrayF64 ) input , ( GrayF64 ) output ) ; } else if ( input instanceof GrayU8 ) { ConvolveImageNormalized . convolve ( ( Kernel2D_S32 ) kernel , ( GrayU8 ) input , ( GrayI8 ) output ) ; } else if ( input instanceof GrayS16 ) { ConvolveImageNormalized . convolve ( ( Kernel2D_S32 ) kernel , ( GrayS16 ) input , ( GrayI16 ) output ) ; } else { throw new IllegalArgumentException ( "Unknown image type: " + input . getClass ( ) . getName ( ) ) ; } } break ; case INTERLEAVED : { if ( input instanceof InterleavedF32 ) { ConvolveImageNormalized . convolve ( ( Kernel2D_F32 ) kernel , ( InterleavedF32 ) input , ( InterleavedF32 ) output ) ; } else if ( input instanceof InterleavedF64 ) { ConvolveImageNormalized . convolve ( ( Kernel2D_F64 ) kernel , ( InterleavedF64 ) input , ( InterleavedF64 ) output ) ; } else if ( input instanceof InterleavedU8 ) { ConvolveImageNormalized . convolve ( ( Kernel2D_S32 ) kernel , ( InterleavedU8 ) input , ( InterleavedI8 ) output ) ; } else if ( input instanceof InterleavedS16 ) { ConvolveImageNormalized . convolve ( ( Kernel2D_S32 ) kernel , ( InterleavedS16 ) input , ( InterleavedI16 ) output ) ; } else { throw new IllegalArgumentException ( "Unknown image type: " + input . getClass ( ) . getName ( ) ) ; } } break ; case PLANAR : { Planar inp = ( Planar ) input ; Planar outp = ( Planar ) output ; for ( int i = 0 ; i < inp . getNumBands ( ) ; i ++ ) { convolveNormalized ( kernel , inp . getBand ( i ) , outp . getBand ( i ) ) ; } } break ; default : throw new IllegalArgumentException ( "Unknown image family" ) ; } }
Performs a 2D convolution across the image while re - normalizing the kernel depending on its overlap with the image .
613
25
27,154
private void addFirstSegment ( int x , int y ) { Point2D_I32 p = queuePoints . grow ( ) ; p . set ( x , y ) ; EdgeSegment s = new EdgeSegment ( ) ; s . points . add ( p ) ; s . index = 0 ; s . parent = s . parentPixel = - 1 ; e . segments . add ( s ) ; open . add ( s ) ; }
Starts a new segment at the first point in the contour
94
13
27,155
public static < T extends ImageBase < T > > void abs ( T input , T output ) { if ( input instanceof ImageGray ) { if ( GrayS8 . class == input . getClass ( ) ) { PixelMath . abs ( ( GrayS8 ) input , ( GrayS8 ) output ) ; } else if ( GrayS16 . class == input . getClass ( ) ) { PixelMath . abs ( ( GrayS16 ) input , ( GrayS16 ) output ) ; } else if ( GrayS32 . class == input . getClass ( ) ) { PixelMath . abs ( ( GrayS32 ) input , ( GrayS32 ) output ) ; } else if ( GrayS64 . class == input . getClass ( ) ) { PixelMath . abs ( ( GrayS64 ) input , ( GrayS64 ) output ) ; } else if ( GrayF32 . class == input . getClass ( ) ) { PixelMath . abs ( ( GrayF32 ) input , ( GrayF32 ) output ) ; } else if ( GrayF64 . class == input . getClass ( ) ) { PixelMath . abs ( ( GrayF64 ) input , ( GrayF64 ) output ) ; } // otherwise assume it is an unsigned image type } else if ( input instanceof ImageInterleaved ) { if ( InterleavedS8 . class == input . getClass ( ) ) { PixelMath . abs ( ( InterleavedS8 ) input , ( InterleavedS8 ) output ) ; } else if ( InterleavedS16 . class == input . getClass ( ) ) { PixelMath . abs ( ( InterleavedS16 ) input , ( InterleavedS16 ) output ) ; } else if ( InterleavedS32 . class == input . getClass ( ) ) { PixelMath . abs ( ( InterleavedS32 ) input , ( InterleavedS32 ) output ) ; } else if ( InterleavedS64 . class == input . getClass ( ) ) { PixelMath . abs ( ( InterleavedS64 ) input , ( InterleavedS64 ) output ) ; } else if ( InterleavedF32 . class == input . getClass ( ) ) { PixelMath . abs ( ( InterleavedF32 ) input , ( InterleavedF32 ) output ) ; } else if ( InterleavedF64 . class == input . getClass ( ) ) { PixelMath . abs ( ( InterleavedF64 ) input , ( InterleavedF64 ) output ) ; } } else { Planar in = ( Planar ) input ; Planar out = ( Planar ) output ; for ( int i = 0 ; i < in . getNumBands ( ) ; i ++ ) { abs ( in . getBand ( i ) , out . getBand ( i ) ) ; } } }
Sets each pixel in the output image to be the absolute value of the input image . Both the input and output image can be the same instance .
609
30
27,156
public static < T extends ImageBase < T > > void boundImage ( T input , double min , double max ) { if ( input instanceof ImageGray ) { if ( GrayU8 . class == input . getClass ( ) ) { PixelMath . boundImage ( ( GrayU8 ) input , ( int ) min , ( int ) max ) ; } else if ( GrayS8 . class == input . getClass ( ) ) { PixelMath . boundImage ( ( GrayS8 ) input , ( int ) min , ( int ) max ) ; } else if ( GrayU16 . class == input . getClass ( ) ) { PixelMath . boundImage ( ( GrayU16 ) input , ( int ) min , ( int ) max ) ; } else if ( GrayS16 . class == input . getClass ( ) ) { PixelMath . boundImage ( ( GrayS16 ) input , ( int ) min , ( int ) max ) ; } else if ( GrayS32 . class == input . getClass ( ) ) { PixelMath . boundImage ( ( GrayS32 ) input , ( int ) min , ( int ) max ) ; } else if ( GrayS64 . class == input . getClass ( ) ) { PixelMath . boundImage ( ( GrayS64 ) input , ( long ) min , ( long ) max ) ; } else if ( GrayF32 . class == input . getClass ( ) ) { PixelMath . boundImage ( ( GrayF32 ) input , ( float ) min , ( float ) max ) ; } else if ( GrayF64 . class == input . getClass ( ) ) { PixelMath . boundImage ( ( GrayF64 ) input , min , max ) ; } else { throw new IllegalArgumentException ( "Unknown image Type: " + input . getClass ( ) . getSimpleName ( ) ) ; } } else if ( input instanceof Planar ) { Planar in = ( Planar ) input ; for ( int i = 0 ; i < in . getNumBands ( ) ; i ++ ) { boundImage ( in . getBand ( i ) , min , max ) ; } } }
Bounds image pixels to be between these two values .
456
11
27,157
private void updateTargetDescription ( ) { if ( targetPt != null ) { TupleDesc feature = describe . createDescription ( ) ; describe . process ( targetPt . x , targetPt . y , targetOrientation , targetRadius , feature ) ; tuplePanel . setDescription ( feature ) ; } else { tuplePanel . setDescription ( null ) ; } tuplePanel . repaint ( ) ; }
Extracts the target description and updates the panel . Should only be called from a swing thread
88
19
27,158
public static DMatrixRMaj inducedHomography13 ( TrifocalTensor tensor , Vector3D_F64 line2 , DMatrixRMaj output ) { if ( output == null ) output = new DMatrixRMaj ( 3 , 3 ) ; DMatrixRMaj T = tensor . T1 ; // H(:,0) = transpose(T1)*line output . data [ 0 ] = T . data [ 0 ] * line2 . x + T . data [ 3 ] * line2 . y + T . data [ 6 ] * line2 . z ; output . data [ 3 ] = T . data [ 1 ] * line2 . x + T . data [ 4 ] * line2 . y + T . data [ 7 ] * line2 . z ; output . data [ 6 ] = T . data [ 2 ] * line2 . x + T . data [ 5 ] * line2 . y + T . data [ 8 ] * line2 . z ; // H(:,1) = transpose(T2)*line T = tensor . T2 ; output . data [ 1 ] = T . data [ 0 ] * line2 . x + T . data [ 3 ] * line2 . y + T . data [ 6 ] * line2 . z ; output . data [ 4 ] = T . data [ 1 ] * line2 . x + T . data [ 4 ] * line2 . y + T . data [ 7 ] * line2 . z ; output . data [ 7 ] = T . data [ 2 ] * line2 . x + T . data [ 5 ] * line2 . y + T . data [ 8 ] * line2 . z ; // H(:,2) = transpose(T3)*line T = tensor . T3 ; output . data [ 2 ] = T . data [ 0 ] * line2 . x + T . data [ 3 ] * line2 . y + T . data [ 6 ] * line2 . z ; output . data [ 5 ] = T . data [ 1 ] * line2 . x + T . data [ 4 ] * line2 . y + T . data [ 7 ] * line2 . z ; output . data [ 8 ] = T . data [ 2 ] * line2 . x + T . data [ 5 ] * line2 . y + T . data [ 8 ] * line2 . z ; // Vector3D_F64 temp = new Vector3D_F64(); // // for( int i = 0; i < 3; i++ ) { // GeometryMath_F64.multTran(tensor.getT(i),line,temp); // output.unsafe_set(0,i,temp.x); // output.unsafe_set(1,i,temp.y); // output.unsafe_set(2,i,temp.z); // } return output ; }
Computes the homography induced from view 1 to 3 by a line in view 2 . The provided line in view 2 must contain the view 2 observation .
627
31
27,159
public static DMatrixRMaj inducedHomography12 ( TrifocalTensor tensor , Vector3D_F64 line3 , DMatrixRMaj output ) { if ( output == null ) output = new DMatrixRMaj ( 3 , 3 ) ; // H(:,0) = T1*line DMatrixRMaj T = tensor . T1 ; output . data [ 0 ] = T . data [ 0 ] * line3 . x + T . data [ 1 ] * line3 . y + T . data [ 2 ] * line3 . z ; output . data [ 3 ] = T . data [ 3 ] * line3 . x + T . data [ 4 ] * line3 . y + T . data [ 5 ] * line3 . z ; output . data [ 6 ] = T . data [ 6 ] * line3 . x + T . data [ 7 ] * line3 . y + T . data [ 8 ] * line3 . z ; // H(:,0) = T2*line T = tensor . T2 ; output . data [ 1 ] = T . data [ 0 ] * line3 . x + T . data [ 1 ] * line3 . y + T . data [ 2 ] * line3 . z ; output . data [ 4 ] = T . data [ 3 ] * line3 . x + T . data [ 4 ] * line3 . y + T . data [ 5 ] * line3 . z ; output . data [ 7 ] = T . data [ 6 ] * line3 . x + T . data [ 7 ] * line3 . y + T . data [ 8 ] * line3 . z ; // H(:,0) = T3*line T = tensor . T3 ; output . data [ 2 ] = T . data [ 0 ] * line3 . x + T . data [ 1 ] * line3 . y + T . data [ 2 ] * line3 . z ; output . data [ 5 ] = T . data [ 3 ] * line3 . x + T . data [ 4 ] * line3 . y + T . data [ 5 ] * line3 . z ; output . data [ 8 ] = T . data [ 6 ] * line3 . x + T . data [ 7 ] * line3 . y + T . data [ 8 ] * line3 . z ; // Vector3D_F64 temp = new Vector3D_F64(); // // for( int i = 0; i < 3; i++ ) { // GeometryMath_F64.mult(tensor.getT(i), line, temp); // output.unsafe_set(0,i,temp.x); // output.unsafe_set(1,i,temp.y); // output.unsafe_set(2,i,temp.z); // } return output ; }
Computes the homography induced from view 1 to 2 by a line in view 3 . The provided line in view 3 must contain the view 3 observation .
616
31
27,160
public static DMatrixRMaj homographyStereo3Pts ( DMatrixRMaj F , AssociatedPair p1 , AssociatedPair p2 , AssociatedPair p3 ) { HomographyInducedStereo3Pts alg = new HomographyInducedStereo3Pts ( ) ; alg . setFundamental ( F , null ) ; if ( ! alg . process ( p1 , p2 , p3 ) ) return null ; return alg . getHomography ( ) ; }
Computes the homography induced from a planar surface when viewed from two views using correspondences of three points . Observations must be on the planar surface .
110
33
27,161
public static DMatrixRMaj homographyStereoLinePt ( DMatrixRMaj F , PairLineNorm line , AssociatedPair point ) { HomographyInducedStereoLinePt alg = new HomographyInducedStereoLinePt ( ) ; alg . setFundamental ( F , null ) ; alg . process ( line , point ) ; return alg . getHomography ( ) ; }
Computes the homography induced from a planar surface when viewed from two views using correspondences of a line and a point . Observations must be on the planar surface .
91
36
27,162
public static DMatrixRMaj homographyStereo2Lines ( DMatrixRMaj F , PairLineNorm line0 , PairLineNorm line1 ) { HomographyInducedStereo2Line alg = new HomographyInducedStereo2Line ( ) ; alg . setFundamental ( F , null ) ; if ( ! alg . process ( line0 , line1 ) ) return null ; return alg . getHomography ( ) ; }
Computes the homography induced from a planar surface when viewed from two views using correspondences of two lines . Observations must be on the planar surface .
99
33
27,163
public static DMatrixRMaj createFundamental ( DMatrixRMaj E , CameraPinhole intrinsic ) { DMatrixRMaj K = PerspectiveOps . pinholeToMatrix ( intrinsic , ( DMatrixRMaj ) null ) ; return createFundamental ( E , K ) ; }
Computes a Fundamental matrix given an Essential matrix and the camera s intrinsic parameters .
62
16
27,164
public static void projectiveToMetric ( DMatrixRMaj cameraMatrix , DMatrixRMaj H , Se3_F64 worldToView , DMatrixRMaj K ) { DMatrixRMaj tmp = new DMatrixRMaj ( 3 , 4 ) ; CommonOps_DDRM . mult ( cameraMatrix , H , tmp ) ; MultiViewOps . decomposeMetricCamera ( tmp , K , worldToView ) ; }
Elevates a projective camera matrix into a metric one using the rectifying homography . Extracts calibration and Se3 pose .
97
27
27,165
public static void projectiveToMetricKnownK ( DMatrixRMaj cameraMatrix , DMatrixRMaj H , DMatrixRMaj K , Se3_F64 worldToView ) { DMatrixRMaj tmp = new DMatrixRMaj ( 3 , 4 ) ; CommonOps_DDRM . mult ( cameraMatrix , H , tmp ) ; DMatrixRMaj K_inv = new DMatrixRMaj ( 3 , 3 ) ; CommonOps_DDRM . invert ( K , K_inv ) ; DMatrixRMaj P = new DMatrixRMaj ( 3 , 4 ) ; CommonOps_DDRM . mult ( K_inv , tmp , P ) ; CommonOps_DDRM . extract ( P , 0 , 0 , worldToView . R ) ; worldToView . T . x = P . get ( 0 , 3 ) ; worldToView . T . y = P . get ( 1 , 3 ) ; worldToView . T . z = P . get ( 2 , 3 ) ; SingularValueDecomposition_F64 < DMatrixRMaj > svd = DecompositionFactory_DDRM . svd ( true , true , true ) ; DMatrixRMaj R = worldToView . R ; if ( ! svd . decompose ( R ) ) throw new RuntimeException ( "SVD Failed" ) ; CommonOps_DDRM . multTransB ( svd . getU ( null , false ) , svd . getV ( null , false ) , R ) ; // determinant should be +1 double det = CommonOps_DDRM . det ( R ) ; if ( det < 0 ) { CommonOps_DDRM . scale ( - 1 , R ) ; worldToView . T . scale ( - 1 ) ; } }
Convert the projective camera matrix into a metric transform given the rectifying homography and a known calibration matrix .
396
23
27,166
public static void rectifyHToAbsoluteQuadratic ( DMatrixRMaj H , DMatrixRMaj Q ) { int indexQ = 0 ; for ( int rowA = 0 ; rowA < 4 ; rowA ++ ) { for ( int colB = 0 ; colB < 4 ; colB ++ ) { int indexA = rowA * 4 ; int indexB = colB * 4 ; double sum = 0 ; for ( int i = 0 ; i < 3 ; i ++ ) { // sum += H.get(rowA,i)*H.get(colB,i); sum += H . data [ indexA ++ ] * H . data [ indexB ++ ] ; } // Q.set(rowA,colB,sum); Q . data [ indexQ ++ ] = sum ; } } }
Rectifying homography to dual absolute quadratic .
177
11
27,167
public static void intrinsicFromAbsoluteQuadratic ( DMatrixRMaj Q , DMatrixRMaj P , CameraPinhole intrinsic ) { DMatrixRMaj tmp = new DMatrixRMaj ( 3 , 4 ) ; DMatrixRMaj tmp2 = new DMatrixRMaj ( 3 , 3 ) ; CommonOps_DDRM . mult ( P , Q , tmp ) ; CommonOps_DDRM . multTransB ( tmp , P , tmp2 ) ; decomposeDiac ( tmp2 , intrinsic ) ; }
Extracts the intrinsic camera matrix from a view given its camera matrix and the dual absolute quadratic .
117
22
27,168
public static Tuple2 < List < Point2D_F64 > , List < Point2D_F64 > > split2 ( List < AssociatedPair > input ) { List < Point2D_F64 > list1 = new ArrayList <> ( ) ; List < Point2D_F64 > list2 = new ArrayList <> ( ) ; for ( int i = 0 ; i < input . size ( ) ; i ++ ) { list1 . add ( input . get ( i ) . p1 ) ; list2 . add ( input . get ( i ) . p2 ) ; } return new Tuple2 <> ( list1 , list2 ) ; }
Splits the associated pairs into two lists
146
8
27,169
public static Tuple3 < List < Point2D_F64 > , List < Point2D_F64 > , List < Point2D_F64 > > split3 ( List < AssociatedTriple > input ) { List < Point2D_F64 > list1 = new ArrayList <> ( ) ; List < Point2D_F64 > list2 = new ArrayList <> ( ) ; List < Point2D_F64 > list3 = new ArrayList <> ( ) ; for ( int i = 0 ; i < input . size ( ) ; i ++ ) { list1 . add ( input . get ( i ) . p1 ) ; list2 . add ( input . get ( i ) . p2 ) ; list3 . add ( input . get ( i ) . p3 ) ; } return new Tuple3 <> ( list1 , list2 , list3 ) ; }
Splits the associated triple into three lists
195
8
27,170
protected void performShrinkage ( I transform , int numLevels ) { // step through each layer in the pyramid. for ( int i = 0 ; i < numLevels ; i ++ ) { int w = transform . width ; int h = transform . height ; int ww = w / 2 ; int hh = h / 2 ; Number threshold ; I subband ; // HL subband = transform . subimage ( ww , 0 , w , hh , null ) ; threshold = computeThreshold ( subband ) ; rule . process ( subband , threshold ) ; // System.out.print("HL = "+threshold); // LH subband = transform . subimage ( 0 , hh , ww , h , null ) ; threshold = computeThreshold ( subband ) ; rule . process ( subband , threshold ) ; // System.out.print(" LH = "+threshold); // HH subband = transform . subimage ( ww , hh , w , h , null ) ; threshold = computeThreshold ( subband ) ; rule . process ( subband , threshold ) ; // System.out.println(" HH = "+threshold); transform = transform . subimage ( 0 , 0 , ww , hh , null ) ; } }
Performs wavelet shrinking using the specified rule and by computing a threshold for each subband .
270
19
27,171
@ Override public void denoise ( GrayF32 transform , int numLevels ) { int scale = UtilWavelet . computeScale ( numLevels ) ; final int h = transform . height ; final int w = transform . width ; // width and height of scaling image final int innerWidth = w / scale ; final int innerHeight = h / scale ; GrayF32 subbandHH = transform . subimage ( w / 2 , h / 2 , w , h , null ) ; float sigma = UtilDenoiseWavelet . estimateNoiseStdDev ( subbandHH , null ) ; float threshold = ( float ) UtilDenoiseWavelet . universalThreshold ( subbandHH , sigma ) ; // apply same threshold to all wavelet coefficients rule . process ( transform . subimage ( innerWidth , 0 , w , h , null ) , threshold ) ; rule . process ( transform . subimage ( 0 , innerHeight , innerWidth , h , null ) , threshold ) ; }
Applies VisuShrink denoising to the provided multilevel wavelet transform using the provided threshold .
212
24
27,172
public static WaveletDescription < WlCoef_F32 > generate_F32 ( int I ) { if ( I != 6 ) { throw new IllegalArgumentException ( "Only 6 is currently supported" ) ; } WlCoef_F32 coef = new WlCoef_F32 ( ) ; coef . offsetScaling = - 2 ; coef . offsetWavelet = - 2 ; coef . scaling = new float [ 6 ] ; coef . wavelet = new float [ 6 ] ; double sqrt7 = Math . sqrt ( 7 ) ; double div = 16.0 * Math . sqrt ( 2 ) ; coef . scaling [ 0 ] = ( float ) ( ( 1.0 - sqrt7 ) / div ) ; coef . scaling [ 1 ] = ( float ) ( ( 5.0 + sqrt7 ) / div ) ; coef . scaling [ 2 ] = ( float ) ( ( 14.0 + 2.0 * sqrt7 ) / div ) ; coef . scaling [ 3 ] = ( float ) ( ( 14.0 - 2.0 * sqrt7 ) / div ) ; coef . scaling [ 4 ] = ( float ) ( ( 1.0 - sqrt7 ) / div ) ; coef . scaling [ 5 ] = ( float ) ( ( - 3.0 + sqrt7 ) / div ) ; coef . wavelet [ 0 ] = coef . scaling [ 5 ] ; coef . wavelet [ 1 ] = - coef . scaling [ 4 ] ; coef . wavelet [ 2 ] = coef . scaling [ 3 ] ; coef . wavelet [ 3 ] = - coef . scaling [ 2 ] ; coef . wavelet [ 4 ] = coef . scaling [ 1 ] ; coef . wavelet [ 5 ] = - coef . scaling [ 0 ] ; WlBorderCoefStandard < WlCoef_F32 > inverse = new WlBorderCoefStandard <> ( coef ) ; return new WaveletDescription <> ( new BorderIndex1D_Wrap ( ) , coef , inverse ) ; }
Creates a description of a Coiflet of order I wavelet .
460
15
27,173
public static FitData < EllipseRotated_F64 > fitEllipse_F64 ( List < Point2D_F64 > points , int iterations , boolean computeError , FitData < EllipseRotated_F64 > outputStorage ) { if ( outputStorage == null ) { outputStorage = new FitData <> ( new EllipseRotated_F64 ( ) ) ; } // Compute the optimal algebraic error FitEllipseAlgebraic_F64 algebraic = new FitEllipseAlgebraic_F64 ( ) ; if ( ! algebraic . process ( points ) ) { // could be a line or some other weird case. Create a crude estimate instead FitData < Circle2D_F64 > circleData = averageCircle_F64 ( points , null , null ) ; Circle2D_F64 circle = circleData . shape ; outputStorage . shape . set ( circle . center . x , circle . center . y , circle . radius , circle . radius , 0 ) ; } else { UtilEllipse_F64 . convert ( algebraic . getEllipse ( ) , outputStorage . shape ) ; } // Improve the solution from algebraic into Euclidean if ( iterations > 0 ) { RefineEllipseEuclideanLeastSquares_F64 leastSquares = new RefineEllipseEuclideanLeastSquares_F64 ( ) ; leastSquares . setMaxIterations ( iterations ) ; leastSquares . refine ( outputStorage . shape , points ) ; outputStorage . shape . set ( leastSquares . getFound ( ) ) ; } // compute the average Euclidean error if the user requests it if ( computeError ) { ClosestPointEllipseAngle_F64 closestPoint = new ClosestPointEllipseAngle_F64 ( 1e-8 , 100 ) ; closestPoint . setEllipse ( outputStorage . shape ) ; double total = 0 ; for ( Point2D_F64 p : points ) { closestPoint . process ( p ) ; total += p . distance ( closestPoint . getClosest ( ) ) ; } outputStorage . error = total / points . size ( ) ; } else { outputStorage . error = 0 ; } return outputStorage ; }
Computes the best fit ellipse based on minimizing Euclidean distance . An estimate is initially provided using algebraic algorithm which is then refined using non - linear optimization . The amount of non - linear optimization can be controlled using iterations parameter . Will work with partial and complete contours of objects .
496
60
27,174
public static List < Point2D_F64 > convert_I32_F64 ( List < Point2D_I32 > points ) { return convert_I32_F64 ( points , null ) . toList ( ) ; }
Converts a list of I32 points into F64
51
11
27,175
public static FitData < Circle2D_F64 > averageCircle_I32 ( List < Point2D_I32 > points , GrowQueue_F64 optional , FitData < Circle2D_F64 > outputStorage ) { if ( outputStorage == null ) { outputStorage = new FitData <> ( new Circle2D_F64 ( ) ) ; } if ( optional == null ) { optional = new GrowQueue_F64 ( ) ; } Circle2D_F64 circle = outputStorage . shape ; int N = points . size ( ) ; // find center of the circle by computing the mean x and y int sumX = 0 , sumY = 0 ; for ( int i = 0 ; i < N ; i ++ ) { Point2D_I32 p = points . get ( i ) ; sumX += p . x ; sumY += p . y ; } optional . reset ( ) ; double centerX = circle . center . x = sumX / ( double ) N ; double centerY = circle . center . y = sumY / ( double ) N ; double meanR = 0 ; for ( int i = 0 ; i < N ; i ++ ) { Point2D_I32 p = points . get ( i ) ; double dx = p . x - centerX ; double dy = p . y - centerY ; double r = Math . sqrt ( dx * dx + dy * dy ) ; optional . push ( r ) ; meanR += r ; } meanR /= N ; circle . radius = meanR ; // compute radius variance double variance = 0 ; for ( int i = 0 ; i < N ; i ++ ) { double diff = optional . get ( i ) - meanR ; variance += diff * diff ; } outputStorage . error = variance / N ; return outputStorage ; }
Computes a circle which has it s center at the mean position of the provided points and radius is equal to the average distance of each point from the center . While fast to compute the provided circle is not a best fit circle by any reasonable metric except for special cases .
389
54
27,176
public void fixate ( ) { ransac = FactoryMultiViewRobust . trifocalRansac ( configTriRansac , configError , configRansac ) ; sba = FactoryMultiView . bundleSparseProjective ( configSBA ) ; }
Must call if you change configurations .
58
7
27,177
boolean selectInitialTriplet ( View seed , GrowQueue_I32 motions , int selected [ ] ) { double bestScore = 0 ; for ( int i = 0 ; i < motions . size ; i ++ ) { View viewB = seed . connections . get ( i ) . other ( seed ) ; for ( int j = i + 1 ; j < motions . size ; j ++ ) { View viewC = seed . connections . get ( j ) . other ( seed ) ; double s = scoreTripleView ( seed , viewB , viewC ) ; if ( s > bestScore ) { bestScore = s ; selected [ 0 ] = i ; selected [ 1 ] = j ; } } } return bestScore != 0 ; }
Exhaustively look at all triplets that connect with the seed view
155
14
27,178
private void triangulateFeatures ( List < AssociatedTriple > inliers , DMatrixRMaj P1 , DMatrixRMaj P2 , DMatrixRMaj P3 ) { List < DMatrixRMaj > cameraMatrices = new ArrayList <> ( ) ; cameraMatrices . add ( P1 ) ; cameraMatrices . add ( P2 ) ; cameraMatrices . add ( P3 ) ; // need elements to be non-empty so that it can use set(). probably over optimization List < Point2D_F64 > triangObs = new ArrayList <> ( ) ; triangObs . add ( null ) ; triangObs . add ( null ) ; triangObs . add ( null ) ; Point4D_F64 X = new Point4D_F64 ( ) ; for ( int i = 0 ; i < inliers . size ( ) ; i ++ ) { AssociatedTriple t = inliers . get ( i ) ; triangObs . set ( 0 , t . p1 ) ; triangObs . set ( 1 , t . p2 ) ; triangObs . set ( 2 , t . p3 ) ; // triangulation can fail if all 3 views have the same pixel value. This has been observed in // simulated 3D scenes if ( triangulator . triangulate ( triangObs , cameraMatrices , X ) ) { structure . points [ i ] . set ( X . x , X . y , X . z , X . w ) ; } else { throw new RuntimeException ( "Failed to triangulate a point in the inlier set?! Handle if this is common" ) ; } } }
Triangulates the location of each features in homogenous space
361
12
27,179
private void initializeProjective3 ( FastQueue < AssociatedTriple > associated , FastQueue < AssociatedTripleIndex > associatedIdx , int totalViews , View viewA , View viewB , View viewC , int idxViewB , int idxViewC ) { ransac . process ( associated . toList ( ) ) ; List < AssociatedTriple > inliers = ransac . getMatchSet ( ) ; TrifocalTensor model = ransac . getModelParameters ( ) ; if ( verbose != null ) verbose . println ( "Remaining after RANSAC " + inliers . size ( ) + " / " + associated . size ( ) ) ; // projective camera matrices for each view DMatrixRMaj P1 = CommonOps_DDRM . identity ( 3 , 4 ) ; DMatrixRMaj P2 = new DMatrixRMaj ( 3 , 4 ) ; DMatrixRMaj P3 = new DMatrixRMaj ( 3 , 4 ) ; MultiViewOps . extractCameraMatrices ( model , P2 , P3 ) ; // Initialize the 3D scene structure, stored in a format understood by bundle adjustment structure . initialize ( totalViews , inliers . size ( ) ) ; // specify the found projective camera matrices db . lookupShape ( viewA . id , shape ) ; // The first view is assumed to be the coordinate system's origin and is identity by definition structure . setView ( 0 , true , P1 , shape . width , shape . height ) ; db . lookupShape ( viewB . id , shape ) ; structure . setView ( idxViewB , false , P2 , shape . width , shape . height ) ; db . lookupShape ( viewC . id , shape ) ; structure . setView ( idxViewC , false , P3 , shape . width , shape . height ) ; // triangulate homogenous coordinates for each point in the inlier set triangulateFeatures ( inliers , P1 , P2 , P3 ) ; // Update the list of common features by pruning features not in the inlier set seedToStructure . resize ( viewA . totalFeatures ) ; seedToStructure . fill ( - 1 ) ; // -1 indicates no match inlierToSeed . resize ( inliers . size ( ) ) ; for ( int i = 0 ; i < inliers . size ( ) ; i ++ ) { int inputIdx = ransac . getInputIndex ( i ) ; // table to go from inlier list into seed feature index inlierToSeed . data [ i ] = matchesTripleIdx . get ( inputIdx ) . a ; // seed feature index into the ouptut structure index seedToStructure . data [ inlierToSeed . data [ i ] ] = i ; } }
Initializes projective reconstruction from 3 - views .
615
10
27,180
boolean findRemainingCameraMatrices ( LookupSimilarImages db , View seed , GrowQueue_I32 motions ) { points3D . reset ( ) ; // points in 3D for ( int i = 0 ; i < structure . points . length ; i ++ ) { structure . points [ i ] . get ( points3D . grow ( ) ) ; } // contains associated pairs of pixel observations // save a call to db by using the previously loaded points assocPixel . reset ( ) ; for ( int i = 0 ; i < inlierToSeed . size ; i ++ ) { // inliers from triple RANSAC // each of these inliers was declared a feature in the world reference frame assocPixel . grow ( ) . p1 . set ( matchesTriple . get ( i ) . p1 ) ; } DMatrixRMaj cameraMatrix = new DMatrixRMaj ( 3 , 4 ) ; for ( int motionIdx = 0 ; motionIdx < motions . size ; motionIdx ++ ) { // skip views already in the scene's structure if ( motionIdx == selectedTriple [ 0 ] || motionIdx == selectedTriple [ 1 ] ) continue ; int connectionIdx = motions . get ( motionIdx ) ; Motion edge = seed . connections . get ( connectionIdx ) ; View viewI = edge . other ( seed ) ; // Lookup pixel locations of features in the connected view db . lookupPixelFeats ( viewI . id , featsB ) ; if ( ! computeCameraMatrix ( seed , edge , featsB , cameraMatrix ) ) { if ( verbose != null ) { verbose . println ( "Pose estimator failed! motionIdx=" + motionIdx ) ; } return false ; } db . lookupShape ( edge . other ( seed ) . id , shape ) ; structure . setView ( motionIdx , false , cameraMatrix , shape . width , shape . height ) ; } return true ; }
Uses the triangulated points and observations in the root view to estimate the camera matrix for all the views which are remaining
420
25
27,181
private boolean computeCameraMatrix ( View seed , Motion edge , FastQueue < Point2D_F64 > featsB , DMatrixRMaj cameraMatrix ) { boolean seedSrc = edge . src == seed ; int matched = 0 ; for ( int i = 0 ; i < edge . inliers . size ; i ++ ) { // need to go from i to index of detected features in view 'seed' to index index of feature in // the reconstruction AssociatedIndex a = edge . inliers . get ( i ) ; int featId = seedToStructure . data [ seedSrc ? a . src : a . dst ] ; if ( featId == - 1 ) continue ; assocPixel . get ( featId ) . p2 . set ( featsB . get ( seedSrc ? a . dst : a . src ) ) ; matched ++ ; } // All views should have matches for all features, simple sanity check if ( matched != assocPixel . size ) throw new RuntimeException ( "BUG! Didn't find all features in the view" ) ; // Estimate the camera matrix given homogenous pixel observations if ( poseEstimator . processHomogenous ( assocPixel . toList ( ) , points3D . toList ( ) ) ) { cameraMatrix . set ( poseEstimator . getProjective ( ) ) ; return true ; } else { return false ; } }
Computes camera matrix between the seed view and a connected view
295
12
27,182
private SceneObservations createObservationsForBundleAdjustment ( LookupSimilarImages db , View seed , GrowQueue_I32 motions ) { // seed view + the motions SceneObservations observations = new SceneObservations ( motions . size + 1 ) ; // Observations for the seed view are a special case SceneObservations . View obsView = observations . getView ( 0 ) ; for ( int i = 0 ; i < inlierToSeed . size ; i ++ ) { int id = inlierToSeed . data [ i ] ; Point2D_F64 o = featsA . get ( id ) ; // featsA is never modified after initially loaded id = seedToStructure . data [ id ] ; obsView . add ( id , ( float ) o . x , ( float ) o . y ) ; } // Now add observations for edges connected to the seed for ( int i = 0 ; i < motions . size ( ) ; i ++ ) { obsView = observations . getView ( i + 1 ) ; Motion m = seed . connections . get ( motions . get ( i ) ) ; View v = m . other ( seed ) ; boolean seedIsSrc = m . src == seed ; db . lookupPixelFeats ( v . id , featsB ) ; for ( int j = 0 ; j < m . inliers . size ; j ++ ) { AssociatedIndex a = m . inliers . get ( j ) ; int id = seedToStructure . data [ seedIsSrc ? a . src : a . dst ] ; if ( id < 0 ) continue ; Point2D_F64 o = featsB . get ( seedIsSrc ? a . dst : a . src ) ; obsView . add ( id , ( float ) o . x , ( float ) o . y ) ; } } return observations ; }
Convert observations into a format which bundle adjustment will understand
399
11
27,183
private boolean refineWithBundleAdjustment ( SceneObservations observations ) { if ( scaleSBA ) { scaler . applyScale ( structure , observations ) ; } sba . setVerbose ( verbose , verboseLevel ) ; sba . setParameters ( structure , observations ) ; sba . configure ( converge . ftol , converge . gtol , converge . maxIterations ) ; if ( ! sba . optimize ( structure ) ) { return false ; } if ( scaleSBA ) { // only undo scaling on camera matrices since observations are discarded for ( int i = 0 ; i < structure . views . length ; i ++ ) { DMatrixRMaj P = structure . views [ i ] . worldToView ; scaler . pixelScaling . get ( i ) . remove ( P , P ) ; } scaler . undoScale ( structure , observations ) ; } return true ; }
Last step is to refine the current initial estimate with bundle adjustment
193
12
27,184
public static void nv21ToBoof ( byte [ ] data , int width , int height , ImageBase output ) { if ( output instanceof Planar ) { Planar ms = ( Planar ) output ; if ( ms . getBandType ( ) == GrayU8 . class ) { ConvertNV21 . nv21TPlanarRgb_U8 ( data , width , height , ms ) ; } else if ( ms . getBandType ( ) == GrayF32 . class ) { ConvertNV21 . nv21ToPlanarRgb_F32 ( data , width , height , ms ) ; } else { throw new IllegalArgumentException ( "Unsupported output band format" ) ; } } else if ( output instanceof ImageGray ) { if ( output . getClass ( ) == GrayU8 . class ) { nv21ToGray ( data , width , height , ( GrayU8 ) output ) ; } else if ( output . getClass ( ) == GrayF32 . class ) { nv21ToGray ( data , width , height , ( GrayF32 ) output ) ; } else { throw new IllegalArgumentException ( "Unsupported output type" ) ; } } else if ( output instanceof ImageInterleaved ) { if ( output . getClass ( ) == InterleavedU8 . class ) { ConvertNV21 . nv21ToInterleaved ( data , width , height , ( InterleavedU8 ) output ) ; } else if ( output . getClass ( ) == InterleavedF32 . class ) { ConvertNV21 . nv21ToInterleaved ( data , width , height , ( InterleavedF32 ) output ) ; } else { throw new IllegalArgumentException ( "Unsupported output type" ) ; } } else { throw new IllegalArgumentException ( "Boofcv image type not yet supported" ) ; } }
Converts a NV21 encoded byte array into a BoofCV formatted image .
405
16
27,185
public static < T extends ImageGray < T > > T nv21ToGray ( byte [ ] data , int width , int height , T output , Class < T > outputType ) { if ( outputType == GrayU8 . class ) { return ( T ) nv21ToGray ( data , width , height , ( GrayU8 ) output ) ; } else if ( outputType == GrayF32 . class ) { return ( T ) nv21ToGray ( data , width , height , ( GrayF32 ) output ) ; } else { throw new IllegalArgumentException ( "Unsupported BoofCV Image Type " + outputType . getSimpleName ( ) ) ; } }
Converts an NV21 image into a gray scale image . Image type is determined at runtime .
146
19
27,186
public static GrayU8 nv21ToGray ( byte [ ] data , int width , int height , GrayU8 output ) { if ( output != null ) { output . reshape ( width , height ) ; } else { output = new GrayU8 ( width , height ) ; } if ( BoofConcurrency . USE_CONCURRENT ) { ImplConvertNV21_MT . nv21ToGray ( data , output ) ; } else { ImplConvertNV21 . nv21ToGray ( data , output ) ; } return output ; }
Converts an NV21 image into a gray scale U8 image .
119
14
27,187
public void generate ( long value , int gridWidth ) { renderer . init ( ) ; drawBorder ( ) ; double whiteBorder = whiteBorderDoc / markerWidth ; double X0 = whiteBorder + blackBorder ; double Y0 = whiteBorder + blackBorder ; double bw = ( 1.0 - 2 * X0 ) / gridWidth ; // Draw the black corner used to ID the orientation square ( X0 , 1.0 - whiteBorder - blackBorder - bw , bw ) ; final int bitCount = gridWidth * gridWidth - 4 ; for ( int j = 0 ; j < bitCount ; j ++ ) { if ( ( value & ( 1L << j ) ) != 0 ) { box ( bw , j , gridWidth ) ; } } // int s2 = (int)Math.round(ret.width*borderFraction); // int s5 = s2+square*(gridWidth-1); // // int N = gridWidth*gridWidth-4; // for (int i = 0; i < N; i++) { // if( (value& (1<<i)) != 0 ) // continue; // // int where = index(i, gridWidth); // int x = where%gridWidth; // int y = gridWidth-1-(where/gridWidth); // // x = s2 + square*x; // y = s2 + square*y; // // ImageMiscOps.fillRectangle(ret,0xFF,x,y,square,square); // } // ImageMiscOps.fillRectangle(ret,0xFF,s2,s2,square,square); // ImageMiscOps.fillRectangle(ret,0xFF,s5,s5,square,square); // ImageMiscOps.fillRectangle(ret,0xFF,s5,s2,square,square); }
Renders a binary square fiducial
406
8
27,188
public void setConfiguration ( Se3_F64 planeToCamera , CameraPinholeBrown intrinsic ) { this . planeToCamera = planeToCamera ; normToPixel = LensDistortionFactory . narrow ( intrinsic ) . distort_F64 ( false , true ) ; pixelToNorm = LensDistortionFactory . narrow ( intrinsic ) . undistort_F64 ( true , false ) ; planeToCamera . invert ( cameraToPlane ) ; }
Configures the camera s intrinsic and extrinsic parameters
95
11
27,189
public void setIntrinsic ( CameraPinholeBrown intrinsic ) { normToPixel = LensDistortionFactory . narrow ( intrinsic ) . distort_F64 ( false , true ) ; pixelToNorm = LensDistortionFactory . narrow ( intrinsic ) . undistort_F64 ( true , false ) ; }
Configures the camera s intrinsic parameters
66
7
27,190
public void setPlaneToCamera ( Se3_F64 planeToCamera , boolean computeInverse ) { this . planeToCamera = planeToCamera ; if ( computeInverse ) planeToCamera . invert ( cameraToPlane ) ; }
Specifies camera s extrinsic parameters .
53
9
27,191
public boolean planeToPixel ( double pointX , double pointY , Point2D_F64 pixel ) { // convert it into a 3D coordinate and transform into camera reference frame plain3D . set ( - pointY , 0 , pointX ) ; SePointOps_F64 . transform ( planeToCamera , plain3D , camera3D ) ; // if it's behind the camera it can't be seen if ( camera3D . z <= 0 ) return false ; // normalized image coordinates and convert into pixels double normX = camera3D . x / camera3D . z ; double normY = camera3D . y / camera3D . z ; normToPixel . compute ( normX , normY , pixel ) ; return true ; }
Given a point on the plane find the pixel in the image .
159
13
27,192
public boolean planeToNormalized ( double pointX , double pointY , Point2D_F64 normalized ) { // convert it into a 3D coordinate and transform into camera reference frame plain3D . set ( - pointY , 0 , pointX ) ; SePointOps_F64 . transform ( planeToCamera , plain3D , camera3D ) ; // if it's behind the camera it can't be seen if ( camera3D . z <= 0 ) return false ; // normalized image coordinates and convert into pixels normalized . x = camera3D . x / camera3D . z ; normalized . y = camera3D . y / camera3D . z ; return true ; }
Given a point on the plane find the normalized image coordinate
145
11
27,193
public void convert ( FeatureGraph2D graph ) { graph . nodes . resize ( corners . size ) ; graph . reset ( ) ; for ( int i = 0 ; i < corners . size ; i ++ ) { Node c = corners . get ( i ) ; FeatureGraph2D . Node n = graph . nodes . grow ( ) ; n . reset ( ) ; n . set ( c . x , c . y ) ; n . index = c . index ; } for ( int i = 0 ; i < corners . size ; i ++ ) { Node c = corners . get ( i ) ; for ( int j = 0 ; j < 4 ; j ++ ) { if ( c . edges [ j ] == null ) continue ; graph . connect ( c . index , c . edges [ j ] . index ) ; } } }
Convert into a generic graph .
175
7
27,194
public boolean process ( I frame ) { keyFrame = false ; // update the feature tracker tracker . process ( frame ) ; totalFramesProcessed ++ ; List < PointTrack > tracks = tracker . getActiveTracks ( null ) ; if ( tracks . size ( ) == 0 ) return false ; List < AssociatedPair > pairs = new ArrayList <> ( ) ; for ( PointTrack t : tracks ) { pairs . add ( ( AssociatedPair ) t . getCookie ( ) ) ; } // fit the motion model to the feature tracks if ( ! modelMatcher . process ( ( List ) pairs ) ) { return false ; } if ( modelRefiner != null ) { if ( ! modelRefiner . fitModel ( modelMatcher . getMatchSet ( ) , modelMatcher . getModelParameters ( ) , keyToCurr ) ) return false ; } else { keyToCurr . set ( modelMatcher . getModelParameters ( ) ) ; } // mark that the track is in the inlier set for ( AssociatedPair p : modelMatcher . getMatchSet ( ) ) { ( ( AssociatedPairTrack ) p ) . lastUsed = totalFramesProcessed ; } // prune tracks which aren't being used pruneUnusedTracks ( ) ; // Update the motion worldToKey . concat ( keyToCurr , worldToCurr ) ; return true ; }
Processes the next frame in the sequence .
296
9
27,195
public void changeKeyFrame ( ) { // drop all inactive tracks since their location is unknown in the current frame List < PointTrack > inactive = tracker . getInactiveTracks ( null ) ; for ( PointTrack l : inactive ) { tracker . dropTrack ( l ) ; } // set the keyframe for active tracks as their current location List < PointTrack > active = tracker . getActiveTracks ( null ) ; for ( PointTrack l : active ) { AssociatedPairTrack p = l . getCookie ( ) ; p . p1 . set ( l ) ; p . lastUsed = totalFramesProcessed ; } tracker . spawnTracks ( ) ; List < PointTrack > spawned = tracker . getNewTracks ( null ) ; for ( PointTrack l : spawned ) { AssociatedPairTrack p = l . getCookie ( ) ; if ( p == null ) { l . cookie = p = new AssociatedPairTrack ( ) ; // little bit of trickery here. Save the reference so that the point // in the current frame is updated for free as PointTrack is p . p2 = l ; } p . p1 . set ( l ) ; p . lastUsed = totalFramesProcessed ; } worldToKey . set ( worldToCurr ) ; keyToCurr . reset ( ) ; keyFrame = true ; }
Change the current frame into the keyframe . p1 location of existing tracks is set to their current location and new tracks are spawned . Reference frame transformations are also updated
284
33
27,196
public static double autoScale ( List < Point3D_F64 > cloud , double target ) { Point3D_F64 mean = new Point3D_F64 ( ) ; Point3D_F64 stdev = new Point3D_F64 ( ) ; statistics ( cloud , mean , stdev ) ; double scale = target / ( Math . max ( Math . max ( stdev . x , stdev . y ) , stdev . z ) ) ; int N = cloud . size ( ) ; for ( int i = 0 ; i < N ; i ++ ) { cloud . get ( i ) . scale ( scale ) ; } return scale ; }
Automatically rescales the point cloud based so that it has a standard deviation of target
141
17
27,197
public static void statistics ( List < Point3D_F64 > cloud , Point3D_F64 mean , Point3D_F64 stdev ) { final int N = cloud . size ( ) ; for ( int i = 0 ; i < N ; i ++ ) { Point3D_F64 p = cloud . get ( i ) ; mean . x += p . x / N ; mean . y += p . y / N ; mean . z += p . z / N ; } for ( int i = 0 ; i < N ; i ++ ) { Point3D_F64 p = cloud . get ( i ) ; double dx = p . x - mean . x ; double dy = p . y - mean . y ; double dz = p . z - mean . z ; stdev . x += dx * dx / N ; stdev . y += dy * dy / N ; stdev . z += dz * dz / N ; } stdev . x = Math . sqrt ( stdev . x ) ; stdev . y = Math . sqrt ( stdev . y ) ; stdev . z = Math . sqrt ( stdev . z ) ; }
Computes the mean and standard deviation of each axis in the point cloud computed in dependently
254
18
27,198
public static void prune ( List < Point3D_F64 > cloud , int minNeighbors , double radius ) { if ( minNeighbors < 0 ) throw new IllegalArgumentException ( "minNeighbors must be >= 0" ) ; NearestNeighbor < Point3D_F64 > nn = FactoryNearestNeighbor . kdtree ( new KdTreePoint3D_F64 ( ) ) ; NearestNeighbor . Search < Point3D_F64 > search = nn . createSearch ( ) ; nn . setPoints ( cloud , false ) ; FastQueue < NnData < Point3D_F64 > > results = new FastQueue ( NnData . class , true ) ; // It will always find itself minNeighbors += 1 ; // distance is Euclidean squared radius *= radius ; for ( int i = cloud . size ( ) - 1 ; i >= 0 ; i -- ) { search . findNearest ( cloud . get ( i ) , radius , minNeighbors , results ) ; if ( results . size < minNeighbors ) { cloud . remove ( i ) ; } } }
Prunes points from the point cloud if they have very few neighbors
244
13
27,199
public static void computeNormalizationLL ( List < List < Point2D_F64 > > points , NormalizationPoint2D normalize ) { double meanX = 0 ; double meanY = 0 ; int count = 0 ; for ( int i = 0 ; i < points . size ( ) ; i ++ ) { List < Point2D_F64 > l = points . get ( i ) ; for ( int j = 0 ; j < l . size ( ) ; j ++ ) { Point2D_F64 p = l . get ( j ) ; meanX += p . x ; meanY += p . y ; } count += l . size ( ) ; } meanX /= count ; meanY /= count ; double stdX = 0 ; double stdY = 0 ; for ( int i = 0 ; i < points . size ( ) ; i ++ ) { List < Point2D_F64 > l = points . get ( i ) ; for ( int j = 0 ; j < l . size ( ) ; j ++ ) { Point2D_F64 p = l . get ( j ) ; double dx = p . x - meanX ; double dy = p . y - meanY ; stdX += dx * dx ; stdY += dy * dy ; } } normalize . meanX = meanX ; normalize . meanY = meanY ; normalize . stdX = Math . sqrt ( stdX / count ) ; normalize . stdY = Math . sqrt ( stdY / count ) ; }
Computes normalization when points are contained in a list of lists
327
13